diff --git a/.gitattributes b/.gitattributes index 7455a1f18c55630a1a17b20c8fe8792d3201562e..7f4f329cd726c7c254f78bca6ad08f4cd2f9ee05 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1656,3 +1656,11 @@ data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_origin.pdf data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_content_list.json b/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..05fb5652db1d7ec839814acce4dec397a28e60fa --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_content_list.json @@ -0,0 +1,2600 @@ +[ + { + "type": "text", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "text_level": 1, + "bbox": [ + 102, + 101, + 893, + 127 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wonjung Kim\\* wjkim@nclab.kaist.ac.kr KAIST", + "bbox": [ + 145, + 138, + 313, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kenny Tsu Wei Choo kenny.choo.2012@smu.edu.sg Singapore Management University", + "bbox": [ + 380, + 138, + 617, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Youngki Lee \nyoungkilee@smu.edu.sg \nSingapore Management University", + "bbox": [ + 648, + 138, + 885, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Archan Misra", + "bbox": [ + 305, + 199, + 421, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "archanm@smu.edu.sg", + "bbox": [ + 289, + 215, + 439, + 231 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Singapore Management University", + "bbox": [ + 246, + 232, + 480, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rajesh Krishna Balan", + "bbox": [ + 544, + 199, + 720, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "rajesh@smu.edu.sg", + "bbox": [ + 566, + 217, + 699, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Singapore Management University", + "bbox": [ + 514, + 232, + 750, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 83, + 255, + 184, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With app-based interaction increasingly permeating all aspects of daily living, it is essential to ensure that apps are designed to be inclusive and are usable by a wider audience such as the elderly, with various impairments (e.g., visual, audio and motor). We propose Empath-D, a system that fosters empathetic design, by allowing app designers, in-situ, to rapidly evaluate the usability of their apps, from the perspective of impaired users. To provide a truly authentic experience, Empath-D carefully orchestrates the interaction between a smartphone and a VR device, allowing the user to experience simulated impairments in a virtual world while interacting naturally with the app, using a real smartphone. By carefully orchestrating the VR-smarphone interaction, Empath-D tackles challenges such as preserving low-latency app interaction, accurate visualization of hand movement and low-overhead perturbation of I/O streams. Experimental results show that user interaction with Empath-D is comparable (both in accuracy and user perception) to real-world app usage, and that it can simulate impairment effects as effectively as a custom hardware simulator.", + "bbox": [ + 81, + 273, + 483, + 523 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS CONCEPTS", + "text_level": 1, + "bbox": [ + 83, + 535, + 220, + 549 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Human-centered computing $\\rightarrow$ Systems and tools for interaction design; Ubiquitous and mobile computing systems and tools; Accessibility design and evaluation methods; Accessibility systems and tools; Ubiquitous and mobile computing design and evaluation methods;", + "bbox": [ + 81, + 553, + 483, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KEYWORDS", + "text_level": 1, + "bbox": [ + 83, + 635, + 191, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "empathetic design; accessibility; mobile design; virtual reality; multi-device, distributed user interfaces", + "bbox": [ + 81, + 654, + 483, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 83, + 686, + 230, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wonjung Kim, Kenny Tsu Wei Choo, Youngki Lee, Archan Misra, and Rajesh Krishna Balan. 2018. Empath-D: VR-based Empathetic App Design for Accessibility. In MobiSys '18: The 16th Annual International Conference on Mobile", + "bbox": [ + 81, + 699, + 482, + 738 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*This work was done while the author was on an internship at Singapore Management University", + "bbox": [ + 83, + 747, + 482, + 770 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 779, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 83, + 852, + 310, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\odot$ 2018 Association for Computing Machinery.", + "bbox": [ + 83, + 864, + 303, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 978-1-4503-5720-3/18/06...$15.00", + "bbox": [ + 83, + 875, + 294, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/3210240.3210331", + "bbox": [ + 83, + 883, + 272, + 895 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg", + "image_caption": [ + "Figure 1: Overview of Empath-D" + ], + "image_footnote": [], + "bbox": [ + 521, + 263, + 903, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Systems, Applications, and Services, June 10-15, 2018, Munich, Germany. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3210240.3210331", + "bbox": [ + 513, + 501, + 915, + 527 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 514, + 544, + 687, + 556 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Digital interactions have become increasingly commonplace and immersive. We now constantly interact with our personal devices and computing-enhanced ambient objects (such as coffeemakers, home automation systems and digital directories), while engaging in everyday activities, such as commuting, shopping or exercising. Given the ubiquity of such interactions, it is important to ensure that the associated computing interfaces remain accessible to segments of the population, such as the elderly, who suffer from various impairments. The global elderly population is projected to reach $16.7\\%$ by 2050 [33], and such users suffer disproportionately from impairments (e.g., vision) that hinder accessibility.", + "bbox": [ + 511, + 561, + 913, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To support more accessible design, our earlier work [11] introduced the vision of Empath-D, which uses a virtual reality (VR) device to provide mobile application/object designers with a realistic emulation of the interaction experience that impaired users would encounter. In this work, we present the design, implementation and validation of the Empath-D system inspired by this vision. Empath-D's goal is to allow unimpaired application designers to step into the shoes of impaired users and rapidly evaluate the usability of alternative prototypes. While we shall principally focus on empathetic evaluation of mobile applications (apps), Empath-D's design is generic enough to permit emulation of other real-world interactions-e.g., how an elderly user with cataracts and hearing loss would experience a traffic-light controlled pedestrian intersection.", + "bbox": [ + 511, + 715, + 915, + 895 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.12933v1 [cs.HC] 17 Mar 2025", + "bbox": [ + 22, + 258, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Empath-D's $^1$ key idea is to present the user with an impairment-augmented view of the smartphone interface (or other digital objects) in a virtual world, while allowing the non-impaired user to perform natural interactions, using a physical smartphone, with a real-world instance of the smartphone app. At a high-level, Empath-D works as follows (see Figure 1): The (unimpaired) user uses a physical smartphone to perform real-world interactions (such as scrolls, taps or gestures) with the app, while wearing a VR device. The results of such interactions are projected instantaneously through the I/O interfaces (e.g., screen, speaker) of a 'virtual smartphone' visible in the VR display, but only after those I/O streams have been appropriately degraded by the specified impairment. For example, in Figure 1, the virtual phone's display (and the world view) has been appropriately vignetted, to mimic the experience of a user suffering from glaucoma.", + "bbox": [ + 81, + 106, + 482, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Key Challenges: To mimic impairments with adequate fidelity and usability, Empath-D must support the following features:", + "bbox": [ + 81, + 314, + 482, + 340 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Fast, Accurate Multi-device Operation: Empath-D utilizes a split-interaction paradigm: a user interacts with an app using a real-world handheld smartphone, while perceiving (viewing, hearing) the app responses through the VR interface. To faithfully replicate the real-world experience, this split-mode interaction must have tight time coupling and visual fidelity (of the virtual phone's screen), comparable to direct interactions with a standalone smartphone.", + "- Real-time Tracking: To preserve a user's perception of naturalistic interactions, Empath-D must not only capture explicit phone events, but also mirrors physical actions taken by the user (e.g., swinging the phone around or having one's hand hover over the phone). Thus, Empath-D must also track and render, in real-time, the orientation/location of both the phone and the user's hand within the VR device's field-of-view.", + "- Lightweight Impairment Execution: To preserve the feel of natural interaction, Empath-D must insert the impairment-specific perturbations into the input/output streams with imperceptible latency or computational overhead (e.g., no reduction in video frame rate)." + ], + "bbox": [ + 109, + 343, + 482, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Key Contributions: We make the following major contributions:", + "bbox": [ + 83, + 643, + 482, + 656 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- 3-Tier Virtualisation Model: We design a novel 3-tier architecture where (i) the real-world smartphone serves merely as a tracker, forwarding user interaction events (e.g., screen touch and gestures) to a computationally powerful intermediary, after which (ii) the intermediary device perturbs those events by blending in specific input impairments (e.g., hand tremors) and passes them to an app instance running on a smartphone emulator, and finally (iii) the VR device receives the redirected outputs from this app instance and renders an appropriately-impaired (by blending in the output impairments) virtual world, including a virtual smartphone.", + "Real-time Hand and Phone Tracking: We use an RGB-Depth camera, mounted on the head-worn VR device, to track the outline of a user's hand, and subsequently perform a lightweight but realistic 3-D rendering of the hand on the VR" + ], + "bbox": [ + 109, + 657, + 482, + 869 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "display. We also use fiducial marker tracking [14] by the camera to track the position/orientation of the real-world smartphone. We demonstrate our ability to achieve both high-fidelity (pointing error $\\leq 5\\,mm$ ) and low-latency (end-to-end delays below 120 msec) hand tracking and display.", + "bbox": [ + 553, + 106, + 913, + 176 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Usability of Virtualized Phone, in Use Environments: We show that Empath-D is not just usable, but that user performance (absent impairments) using Empath-D's virtual smartphone is equivalent to real-world interaction with a smartphone. In addition, we allow usability testing of apps in their use environments, a key enabler for design of mobile applications which may be used anywhere. Our Samsung Gear VR-based prototype has end-to-end latency low-enough (only 96.3 msec of latency, excluding the mobile app emulation) to permit faithful reproduction of direct smartphone usage.", + "- Validation of Impairment Fidelity and Overall System: We implement two distinct vision (glaucoma & cataract) and one audio (high-frequency hearing loss) impairment in our Empath-D prototype. We then conduct a set of studies using the vision impairments, where 12 participants perform a series of standardised activities (e.g., add an alarm), using both our Empath-D prototype (test) and a commercial hardware vision impairment simulator (control) and establish that the performance of users is equivalent across the test and control groups. Finally, we conduct a small-scale study to provide preliminary evidence that our empathetic approach allows developers to design accessible mobile UIs faster and better." + ], + "bbox": [ + 540, + 180, + 913, + 491 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 THE EMPATH-D VISION", + "text_level": 1, + "bbox": [ + 514, + 506, + 750, + 520 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We use an example to illustrate the use of Empath-D:", + "bbox": [ + 513, + 523, + 836, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Designing for Visual Impairment. Alice is designing a mobile app that automatically magnifies text from real environments seen through its rear camera to aid people who suffer from cataracts (a condition that dims and blurs vision). Alice starts Empath-D and is presented with a web interface that allows her to customise impairments (e.g., specify the intensity of visual blur). After customising the environment, Alice clicks in the Empath-D web interface to (1) compile the environment to her phone used for VR display (VR-phone)2 and (2) connect an input/output service to a separate phone (IO-phone). She then plugs the VR-phone into the VR headset.", + "bbox": [ + 513, + 539, + 913, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Alice then compiles her Android app, and runs it in the Android emulator. She puts on the VR headset and holds the IO-phone in her hands. A virtual smartphone (Virt-phone) shows up in VR, tracking the real-world motion of the IO-phone. Alice now navigates through the virtual world, experiencing it as an \"impaired user, with cataracts\". She holds up IO-phone on a street corner (in the real world), and notices that the magnified text (as seen in the virtual phone in the virtual world) is not clear enough to be legible to a cataract-impaired user. She can now iteratively and rapidly modify her app, recompile it, and execute it in the Android emulator, until she is satisfied with the output. This scenario demonstrates the ease-of-use for Empath-D, with no need for special instrumentation of the app.", + "bbox": [ + 513, + 676, + 913, + 843 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 83, + 71, + 387, + 87 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Wonjung Kim et al.", + "bbox": [ + 790, + 71, + 913, + 85 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "Video of Empath-D in action at https://is.gd/empath_d", + "bbox": [ + 84, + 882, + 346, + 895 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2The VR-phone is needed only for VR devices that require a smartphone-e.g., Samsung Gear VR", + "bbox": [ + 513, + 869, + 913, + 892 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 SYSTEM OVERVIEW", + "text_level": 1, + "bbox": [ + 83, + 104, + 285, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Design Goals and Implications", + "text_level": 1, + "bbox": [ + 83, + 125, + 375, + 141 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Empath-D has the following key goals, which directly influence the salient implementation choices.", + "bbox": [ + 81, + 143, + 480, + 171 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Holistic emulation of impairments: For a truly empathetic experience, the app designer must perceive the effects of impairments not just while using the mobile app, but throughout her immersion in the virtual world. Consider a user, suffering from cataract, who is interacting with her smartphone while attending a dimly dit dinner gathering. Simply blurring the phone display, while leaving the background illumination and focus unchanged, might not replicate challenges in visual contrast that an impaired user would face in reality. This requirement precludes the straightforward use of I/O redirection techniques such as Rio [8], which can potentially perturb the I/O streams of only the mobile device. Instead, the impairment must be applied holistically, to the entire virtual world.", + "- Realistic emulation of smartphone and mobile apps in the virtual world: Empath-D aims at realistically emulating mobile apps within the virtual world rendered by a commodity VR headset. Realistic emulation of mobile apps imposes two requirements. (a) First, the virtual smartphone should have sufficient visual resolution, corresponding to typical usage where the smartphone is held $\\approx 30\\mathrm{cm}$ away from the eye. We shall see (in Section 6.3) that this requirement, coupled with differences in display resolutions between smartphones and VR devices, requires careful magnification of the virtual smartphone to provide legibility without hampering usage fidelity. (b) Second, the user should not perceive any lag between her user input and the rendered view of the app, seen through the VR device. Quantitatively, we thus require that the task completion time, experienced by a user interacting with the emulated application in the virtual world, should be comparable to real-world app usage on a real smartphone.", + "- Use of unmodified app For easy and low-overhead adoption by app designers, Empath-D should support the emulation of mobile applications using the original, unmodified binaries (e.g., .apk for Android). Empath-D's requirement to support empathetic emulation without app modifications implies that app designers would be able to adopt Empath-D with minimal impact to existing development practices.", + "- Low-latency, accurate finger tracking: This goal is an extension of the holistic emulation objective. In the real-world, users utilise instantaneous visual feedback and proprioception to move their fingers around the smartphone display, even when they are hovering but not actually touching the display. To ensure consistency between the user's tactile, visual and proprioceptive perceptions of her hand movement, Empath-D should also realistically render, in the virtual world, the user's hand movements and any changes in the position/orientation of the real-world smartphone, without any perceptible lag. In Section 6, we shall see how the Empath-D implementation meets these stringent performance bounds." + ], + "bbox": [ + 109, + 184, + 482, + 888 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg", + "image_caption": [ + "Figure 2:Empath- $D$ architecture" + ], + "image_footnote": [], + "bbox": [ + 521, + 103, + 911, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Light-weight, effective emulation of impairments: Empath-D will need to emulate impairments, at different levels of severity. For high-fidelity empathetic emulation, the insertion of such impairments in the I/O streams of the smartphone should not add generate any additional artefacts (e.g., increased latency, reduction in display refresh rate, etc.).", + "bbox": [ + 540, + 434, + 931, + 516 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 System Overview", + "text_level": 1, + "bbox": [ + 514, + 531, + 699, + 546 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We now present the overview of the Empath-D system (illustrated in Figure 2).", + "bbox": [ + 513, + 549, + 913, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Using Empath-D in VR. To immersively evaluate the application, the developer (or the tester) starts by installing her developed application binaries (i.e., Android .apkss) to run on the emulated smartphone. The developer then adjusts the profile settings for the impairment using Empath-D's web dashboard and selects a use case scenario (e.g., in office, in the street, etc.). She holds her physical smartphone and puts on the VR headset, earphones (when hearing impairments are involved) and experiences the immersive reality (where she can use the app - now mapped onto the physical smartphone - with the configured impairment under the designated use case scenario) that Empath-D generates. She then tests out various interfaces and functionalities of the app in the immersive VR environments.", + "bbox": [ + 511, + 577, + 913, + 755 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Components of Empath-D. Empath-D runs across three different physical devices: a physical smartphone, a computer, and a VR device (see Figure 2).", + "bbox": [ + 513, + 757, + 913, + 797 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Smartphone: In Empath-D, the user interacts with the app using a real smartphone held in her hand. Interestingly, this smartphone does not run the app itself, but functions as a tracking device, helping to preserve the user's realistic sense of smartphone interaction. The smartphone simply redirects the user interaction events (e.g., touch events such as clicks and swipes on the display and motion events captured by inertial sensors) to the computer, which is in", + "bbox": [ + 511, + 799, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "bbox": [ + 83, + 71, + 460, + 87 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 611, + 73, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "charge of the app emulation. This smartphone also displays a fiducial marker array [14] on its display, to help in efficient, real-time tracking of the phone's location.", + "bbox": [ + 83, + 106, + 480, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Computer: The computer is at the heart of Empath-D's ability to fuse the real and virtual world. It consists of two major components: Phone and Hand Tracker and Mobile Emulator, as well as a Web Dashboard (see Figure 6), which allows the user to select the impairment profile to be applied. In addition, as we shall discuss shortly, this computer may run an Impairment Generator cum Virtual World Renderer). Key functions include:", + "bbox": [ + 83, + 148, + 480, + 244 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The Phone and Hand Tracker, uses image captured by the VR headset-mounted camera, to track the position and pose of the smartphone (relative to the VR device), and create the virtual phone image at the correct position in the virtual world. It also uses the same camera to track the user's hand, as it interacts with the smartphone, and then renders it in the virtual world.", + "- The Mobile Emulator executes the app being tested, using the redirected stream of user interaction events transmitted by the smartphone. The resulting visual output of the app is then transmitted as a sequence of images to the VR device, where these images will be integrated into the virtual phone object; likewise, audio output (if any) is directly streamed to the VR device." + ], + "bbox": [ + 109, + 244, + 480, + 443 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The overall Empath-D framework includes an Impairment Generator that is typically applied as one or more filters over the Virtual World Renderer (an engine such as Unity [44]) which is responsible for combining various virtual objects and rendering the virtual world). The Impairment Generator effectively perturbs/modifies the audio/video feeds of the virtual world, before they are displayed on the VR device. For example, to emulate cataracts, it applies an appropriate 'blurring/dimming' filter on the video feed; similarly to emulate high-frequency hearing loss (an audio impairment), this generator will apply a low-pass filter on the output audio stream. These two components are placed inside a dotted-line rectangle in Figure 2, to reflect the reality that these components run on either the Computer or the VR device, depending on whether the VR device is tethered or not. In untethered VR devices (such as the Samsung Gear VR), the Impairment Generator and the Virtual World Renderer run on the VR device itself. In contrast, tethered devices such as the HTC Vive will run on the computer, and typically offer higher graphics quality, frame rates, faster execution.", + "bbox": [ + 81, + 444, + 482, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VR Device: Finally, the VR device is used to display the synthesised virtual world to the user. This synthesis involves the fusion of the virtual smartphone, the user's hand and the ambient virtual world, all subject to the impairment filter.", + "bbox": [ + 81, + 693, + 482, + 750 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 VR-BASED EMULATION OF MOBILE INTERACTION", + "text_level": 1, + "bbox": [ + 83, + 762, + 415, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Empath-D follows a split-interaction paradigm: for realistic immersion, Empath-D renders the visual and audio output of the target app in the virtual world (i.e., via VR headset's display and speakers), while allowing the user to interact naturalistically with a real-world physical phone. A major challenge in this paradigm is to enable natural, low-latency tracking and display of the real-world motion of both the phone and the user's hands, so as to ensure consistency", + "bbox": [ + 81, + 797, + 482, + 893 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "across the user's visual, tactile and proprioceptive experience. We achieve this by performing three distinct steps: (a) smartphone tracking, (b) hand tracking, and (c) hand rendering in VR, using an RGB-Depth (RGB-D) camera mounted on the VR headset. Empath-D first tracks the position and orientation of the physical smartphone and synchronises the position of the virtual phone to the physical smartphone (See Section 4.1). Separately, Empath-D also captures fingers in the real world and displays them at the correct position (relative to the virtual smartphone) in the virtual world (See Section 4.2 and 4.3).", + "bbox": [ + 511, + 106, + 913, + 244 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg", + "image_caption": [ + "Figure 3: Tracking physical phone with fiducial markers" + ], + "image_footnote": [], + "bbox": [ + 584, + 263, + 843, + 465 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Empath-D uses the headset-mounted RGB-D camera to capture the colour image along with the depth values, relative to the camera. The camera's position is always fixed, relative to the user's head. Its three axes are thus aligned to a user's head: $z$ -axis to the user's forward (gaze) direction, and $x$ and $y$ axes capturing the vertical and horizontal displacement.", + "bbox": [ + 511, + 516, + 913, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Tracking the physical smartphone", + "text_level": 1, + "bbox": [ + 513, + 613, + 838, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Empath-D uses fiducial markers, displayed on the physical smartphone's screen, to localise the smartphone efficiently. It takes a colour image as an input, and returns the transformation relative to the camera's coordinate system: translation and rotation, i.e., x, y, z, roll, pitch, yaw from the RGB-D camera's coordinate system. We employ a technique proposed and detailed in [14].", + "bbox": [ + 511, + 631, + 913, + 714 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Empath-D Hand Tracker component tracks the physical phone using markers captured by the camera. Each marker, displayed on the phone screen, has a distinct pattern. The tracker knows the position of each marker (e.g., top-left, top-right, bottom-left and bottom-right) in the physical smartphone screen's coordinate system. The system first detects these markers in a given colour image, identifying them based on their unique patterns (see Figure 3). In particular, the system recognises the coordinates of each of the four corners of each marker. Moreover, the system knows the true size of, and separation between, each marker. It then uses an object pose estimation algorithm (provided by openCV's solvePnP function [6]), along with the array of fiducial marker points, to compute the 3-D position and orientation of the smartphone. Past", + "bbox": [ + 511, + 714, + 913, + 893 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 84, + 73, + 385, + 87 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Wonjung Kim et al.", + "bbox": [ + 792, + 73, + 911, + 85 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Hand Segmentation" + ], + "code_body": "1: Input: $T\\gets$ Phone's translation (3-D vector) \n2: Input: $R\\gets$ Phone's orientation $(3\\times 3$ rotation matrix), \n3: Input: $F\\gets$ RGBD Frame, 2-D array that each entry $F_{i,j}$ holds a color value and 3-D position relative to the camera. \n4: Input: $V\\gets$ 3-D region of interest (relative to the phone) \n5: Output: fgMask, 2D bool array whose dimension equals to $F$ \n6: \n7: fgMask[i,j] $\\leftarrow$ false for all $(i,j)$ \n8: for point $(i,j)$ in $F$ do \n9: if $(i,j)$ in screen_border then \n10: /\\* Case A: Blue background segmentation \\*/ \n11: fgMask[i,j] $\\leftarrow$ 1-Blue $(F_{i,j}) + 0.5\\cdot Red(F_{i,j}) > \\tau$ \n12: else \n13: /\\* Case B: Depth-based segmentation \\*/ \n14: posphone $\\leftarrow$ $R^{-1}\\cdot (Position(F_{i,j}) - T)$ \n15: fgMask[i,j] $\\leftarrow$ (posphone $\\in V$ ) \n16: end if \n17: end for \n18: return fgMask", + "bbox": [ + 93, + 125, + 480, + 388 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "results [14] show that this technique can compute an object's position and orientation with sub-cm level accuracy.", + "bbox": [ + 81, + 429, + 482, + 457 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This fiducial marker-based algorithm would fail under two conditions: (a) when the markers are occluded by the user's hand, and (b) if the ambient illumination levels are too low or too high, reducing the contrast level of the markers. To tackle (a), the smartphone screen uses an entire array of markers displayed across the scene, thereby ensuring correct smartphone tracking as long as some part of the phone is visible. Contrast concerns are not particularly relevant in our scenario, as we assume that the user is testing the app in a regularly lit work/office environment.", + "bbox": [ + 81, + 457, + 482, + 583 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Hand Segmentation", + "text_level": 1, + "bbox": [ + 83, + 598, + 289, + 614 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Empath-D uses the frames captured by the RGB-D camera to track and segment the user's hand. For each frame, we extract the segment (polygon of pixels) that represents the user's hand, and render that segment in the virtual world. As the goal of hand-tracking is to provide the user with a natural view of her smartphone interactions, we restrict the tracking technique to a 3-D region of interest (ROI) that is centred at the phone, with a depth of $2cm$ and a planar boundary of $6cm$ . In other words, we only track the hand while it is $\\leq 2cms$ away from the smartphone screen, and within $\\leq 6cms$ of the smartphone edges.", + "bbox": [ + 81, + 616, + 482, + 755 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A straightforward approach is to apply a depth-based segmentation strategy, where we first isolate only the foreground points which lie within a depth $= 2cm$ of the smartphone surface. However, we empirically observed that, due to the glossy surface of the smartphone, such depth estimation was inaccurate for points located on the smartphone's screen. Accordingly, we implemented two separate segmentation methods (detailed in Algorithm 1): (case A) a colour-based segmentation approach to identify points which are directly over the smartphone, and (case B) a depth-based approach to identify points which are near, but not over, the smartphone's", + "bbox": [ + 81, + 755, + 482, + 893 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg", + "image_caption": [ + "Figure 4: Mesh of hand" + ], + "image_footnote": [], + "bbox": [ + 542, + 103, + 705, + 268 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg", + "image_caption": [ + "Figure 5:Empath- $D$ hand segmentation" + ], + "image_footnote": [], + "bbox": [ + 718, + 103, + 890, + 268 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "screen. We apply the colour-based segmentation to the points inside the screen's border (thick orange contour in Figure 3) and the depth-based approach to the points outside.", + "bbox": [ + 513, + 337, + 913, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Colour-based segmentation: We adopt the colour-based technique proposed in [41]. The approach tests RGB values to segment foreground (hand) from background, coloured in blue. In our scenario, we target human skin as the foreground. Human skin has a property common in all races: its R value has about twice the value of G and B ( $R \\approx 2G \\approx 2B$ ). Given the property of human skin, we obtain a formula that discriminates the foreground from the background whose $B$ value is 1 (line 11 in Algorithm 1). $\\tau$ is a user-tunable threshold which allows it to adapt to different lighting conditions.", + "bbox": [ + 513, + 378, + 913, + 503 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, note that, to enable tracking of the phone, the phone's screen cannot be completely blue, but will need to contain the array of fiducial markers. We tackle both problems simultaneously by using blue ( $R = 0$ , $G = 0$ , $B = 1$ ) to colour the markers, over a cyan ( $R = 0$ , $G = 1$ , $B = 1$ ) background. Here we modified only $G$ value, which is unused in the colour-based segmentation.", + "bbox": [ + 513, + 503, + 913, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Points outside the smartphone's screen are segmented using the depth-based approach. After identifying the points corresponding to the user's hand, the system translates these points to 3-D coordinates in the camera's coordinate system, using the associated depth values.", + "bbox": [ + 511, + 585, + 913, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 Rendering the hand in the virtual world", + "text_level": 1, + "bbox": [ + 514, + 667, + 883, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After detecting the hand segment, the Empath-D system renders it in the virtual world. The system passes the tracked hands to the Virtual World Renderer, sharing the (i) 3D structure of the hands (surface mesh), (ii) colour image of the RGB-D frame (texture), and (iii) mapping between the surface mesh and the colour image (UV map). In common rendering engines (e.g. Unity), the 3D structure of the hand is represented by a triangle mesh-i.e., a set of vertices, constituting individual small triangles. The mesh is rendered at the same location as the user's hand in the real world. As the user's hand is localised in the coordinates of the RGB-D depth camera, the location is offset by an additional depth value (7cm in our implementation), to reflect the additional distance between the centre of the user's eyes and the depth camera. An important characteristic of our algorithm is that we render the actual image of the user's hands over this triangle mesh. Figure 4 illustrates the Delaunay", + "bbox": [ + 511, + 686, + 913, + 895 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "bbox": [ + 84, + 71, + 460, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 611, + 73, + 913, + 87 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Empath-D Dashboard", + "text_level": 1, + "bbox": [ + 112, + 109, + 285, + 125 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cataract (blur and contrast reduction)", + "bbox": [ + 114, + 143, + 326, + 156 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "enabled*", + "bbox": [ + 114, + 165, + 151, + 172 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "enabled", + "bbox": [ + 114, + 181, + 158, + 189 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Blur intensity*", + "bbox": [ + 114, + 196, + 174, + 205 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "0.1", + "bbox": [ + 114, + 224, + 129, + 232 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Contrast reduction intensity", + "bbox": [ + 114, + 241, + 230, + 250 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1", + "bbox": [ + 114, + 268, + 122, + 276 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "triangulation of a set of points. The mesh is combined with the hand's image (Figure 5), and rendered in the VR display. Extracting and rendering the actual image of the user's finger enhances the immersive feeling of real-life smartphone navigation in the virtual world.", + "bbox": [ + 81, + 361, + 482, + 430 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The complexity of the mesh-i.e., the number of vertices (or triangles) in the rendered hand-is an important parameter in the rendering process. A larger number of vertices captures the contours of the hand more precisely, resulting in a more life-like image. However, this also results in added rendering latency in the rendering engine. To support the twin objectives of low-latency and life-like rendering, we utilise a sub-sampling technique to construct the mesh. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a 32-fold downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the row and column axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We shall show, in Section 6, how our prototype Empath-D implementation uses this technique to achieve our twin objectives.", + "bbox": [ + 81, + 431, + 482, + 640 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 IMPAIRMENT SIMULATION", + "text_level": 1, + "bbox": [ + 83, + 654, + 348, + 667 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Empath-D aims to enable evaluation of the usability of app designs under visual, auditory and haptic impairment simulation. Realistic simulation of various impairments in the VR world is the essential requirement to achieve this goal.", + "bbox": [ + 81, + 672, + 482, + 728 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "There has been a thread of research to simulate impairments through physical simulator devices [1, 13, 29, 39, 49]. For instance, Zimmerman et al. use goggles and enclosing materials to simulate low vision impairments [49]. These hardware simulators generalise the impairment of interest and enable simulation of specific aspects of the impairment pathology rather than emulate exactly how an impairment is. However, impairments can vary greatly between individuals. For instance, glaucoma generally progresses in deterioration from the periphery towards the centre of vision, but in reality, it comes in different shapes and severity, affecting usability of applications in different ways. Existing physical impairment simulators simply approximate this as a central circle of", + "bbox": [ + 81, + 728, + 482, + 893 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg", + "image_caption": [ + "Figure 6: Screenshot of Empath-D impairment configuration dashboard" + ], + "image_footnote": [], + "bbox": [ + 522, + 103, + 707, + 242 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg", + "image_caption": [ + "Figure 7: Simulated cataract (left) and simulated glaucoma (right)" + ], + "image_footnote": [], + "bbox": [ + 718, + 103, + 903, + 242 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "clarity, with blur through to the periphery. Empath-D is advantageous over existing physical simulators in the following ways, it allows: 1) impairments to be customised, 2) simultaneous manifestation of multiple impairments, 3) the addition of new impairments easily. Figure 6 shows the web interface for designers to customise impairments for the target user group.", + "bbox": [ + 513, + 329, + 913, + 412 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Simulating Visual Impairments", + "text_level": 1, + "bbox": [ + 514, + 431, + 813, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Vision is the dominant sensory system by which humans perceive the world, and is a key focus for Empath-D. Vision impairment is one of the most common causes of accessibility problems that comes with age. Common vision impairments include cataracts, glaucoma, and age-related macular degeneration. Such vision impairments present as reduced visual acuity, loss of central/peripheral vision, or decreased contrast sensitivity. It is widely studied that these symptoms can affect the interaction with various desktop and mobile applications; for example, humans use peripheral vision to pre-scan text ahead of his/her point of focus. As the peripheral vision narrows, the scanning becomes less effective, which slows reading [23]. In this work, we examine and simulate two commonly found visual impairments - cataracts and glaucoma.", + "bbox": [ + 511, + 449, + 913, + 628 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our approach is to apply an image effect at the \"eye\" (i.e., a camera pair of view renderers) of the VR scene. From this camera pair, the image effect will apply to all other objects in the scene (e.g., smartphone, fingers, scene), just as how impaired users would experience it. We employed various image filters for different impairments, which 1) provide realism of impairments to help designers to find out usability issues and take corrective actions, and 2) have small computational overhead not to add noticeable delays to our entire emulation.", + "bbox": [ + 511, + 630, + 913, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The approach is flexible and lightweight. Impairment simulator's intensity is configurable at runtime. The image effects are applied at the last stage of the rendering pipeline. Glaucoma presents functionally as a loss in peripheral vision. To simulate glaucoma, we use a vignette with a clear inner circle, blurred inner-outer circle, and black extending outwards from the outer circle (see Figure 7). Cataracts presents functionally as reduced visual acuity and reduced contrast sensitivity. We use a blur filter to simulate reduced visual acuity, and a contrast reduction filter to simulate reduced contrast sensitivity (see Figure 7).", + "bbox": [ + 511, + 753, + 913, + 893 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 84, + 71, + 385, + 85 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Wonjung Kim et al.", + "bbox": [ + 792, + 73, + 911, + 85 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg", + "table_caption": [ + "Table 1: Hardware of Empath-D" + ], + "table_footnote": [], + "table_body": "
VR headsetSamsung Gear VR [5]
VR smartphoneSamsung Galaxy S7 [4]
RGB-D cameraIntel RealSense SR300 [20]
PCCPU: 4 cores, 3.4 GHz\nRAM: 16 GB\nGPU: GeForce GTX 1080 [32]
Physical IO smartphoneSamsung Galaxy S5 [40]
", + "bbox": [ + 102, + 132, + 459, + 234 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The functional aspects of vision impairments are straightforward to create in VR, which give Empath-D high extendability to implement other types of visual impairments. While we just described two impairments pertaining to our studies, it is easy to create other impairments such as colour filters to simulate colour blindness. However, we leave the effect of eye movements on impairments as the future work. Since eye-tracking is currently not supported in Empath-D, a user will need to move his head to achieve the same effect.", + "bbox": [ + 81, + 258, + 482, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Simulating Other Modalities", + "text_level": 1, + "bbox": [ + 83, + 396, + 357, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We discuss how other modalities may be simulated in Empath-D.", + "bbox": [ + 81, + 414, + 475, + 428 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hand Tremors. Hand tremors are a common symptom of Parkinson's disease or Essential tremor and make it hard for one to precisely point on a touchscreen. A hand tremor may be characterised by the frequency and amplitude of oscillatory movement. Since we present virtual representations of the user's hand (i.e., as a 3D mesh) to enable his interaction with the virtual mobile phone, Empath-D similarly perturbs this 3D mesh in VR to create hand tremors. While a user may physically not experience hand movement, the visual perturbation would be sufficient to hinder accurate touch to simulate hand tremors.", + "bbox": [ + 81, + 429, + 482, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hearing Loss. High-frequency hearing loss is a common symptom for the elderly population. People diagnosed with high-frequency hearing loss are unable to hear sounds between $2,000\\mathrm{Hz}$ and 8,000 Hz. These people often struggle to understand or keep up with daily conversations (missing consonants in higher registers, such as the letters F and S or female voices). Empath-D applies a bandpass filter over the output sound of the target application to diminish the sound signals between $2\\mathrm{kHz}$ and $8\\mathrm{kHz}$ and plays the filtered audio feed through the VR device.", + "bbox": [ + 81, + 566, + 486, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 IMPLEMENTATION", + "text_level": 1, + "bbox": [ + 83, + 704, + 277, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.1 Hardware", + "text_level": 1, + "bbox": [ + 83, + 724, + 209, + 738 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We implemented our current Empath-D prototype using the hardware described in Table 1. We used the Samsung Gear VR fitted with the Samsung Galaxy S7 as the VR headset. We used the Intel RealSense SR300 RGB-D camera for finger tracking, selecting this among alternatives as: 1) its small size and low weight allowed us to easily attach it to the VR headset, and 2) its minimum sensing range is low enough to permit hand tracking at a distance of $30\\mathrm{cm}$ . We employed the Samsung Galaxy S5 as the physical I/O device, and a powerful laptop (4 core 3.4 GHz CPU, 16GB RAM) as the intermediary device. The choice of the VR headset itself was deliberate. We chose a Samsung Gear VR headset (an untethered", + "bbox": [ + 81, + 742, + 482, + 895 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg", + "image_caption": [ + "Figure 8: Rendering frame rate under varying virtual display resolution (width : height = 9 : 16, default resolution of Android emulator is 1080x1920)" + ], + "image_footnote": [], + "bbox": [ + 612, + 106, + 818, + 250 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "smartphone-powered VR device) over more powerful PC-tethered VR devices such as the HTC Vive or Oculus Rift. This was mainly because PC-tethered devices such as HTC Vive use IR lasers to localise the headset, which interferes with the IR laser emitted by the RGB-D camera used for depth sensing in hand tracking.", + "bbox": [ + 513, + 335, + 913, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2 Rendering an Emulated App", + "text_level": 1, + "bbox": [ + 514, + 419, + 790, + 436 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We used empirical studies to determine an appropriate screen resolution and frame rate to render the emulated app (and the smartphone) in the VR headset. Empath-D obtains screenshots of its mobile emulator using the Android virtual display [35] and transmits these screenshots over WiFi to the Gear VR device. The overhead of transmitting and rendering these emulated screenshots is proportional to their resolution. The default 1080p resolution could sustain a frame rate of only 18 fps, which causes visible jerkiness. To reduce this overhead, we reduced the resolution (using setDisplayProjection() method), and applied differential transmissions, sending a screenshot only when the emulated app's display changes.", + "bbox": [ + 511, + 438, + 915, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 8 shows the experimental results on the tradeoff between the resolution and the rendering frame rate, obtained while playing a video to ensure continuous change of the screen content. The frame rate saturates at $57~\\text{fps}$ , at a screen resolution of $485\\times 863$ . Moreover, through another user study (described next) to understand the minimum resolution to read an app's contents, we empirically verified that the participants had no issues in reading the app's content at the resolution of $485\\times 863$ . Hence, we choose this resolution as our default, although this setting can be modified (e.g., we can pick a higher resolution, and a lower frame rate, for an app with mostly static content).", + "bbox": [ + 511, + 604, + 915, + 755 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "If Empath-D displays the virtual smartphone at its original size in the virtual world (portrait position), its display becomes illegible. For example, the Samsung Galaxy S7 (in the Gear VR) has a resolution of $2560 \\times 1440$ and an $\\approx 101^{\\circ}$ horizontal field of view yielding a horizontal pixel density of $\\approx 25.3$ pixels/degree. When a virtual phone is held at $30\\mathrm{cm}$ away, the horizontal pixel density drops below 25.3 pixels/degree due to downsampling of the virtual phone screen as seen through the VR display. This presents a problem for viewing the content of the virtual phone - in particular, text - as its pixel density is significantly lower than when viewing a physical", + "bbox": [ + 511, + 756, + 915, + 895 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "bbox": [ + 84, + 71, + 460, + 87 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 611, + 71, + 913, + 87 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg", + "image_caption": [ + "Figure 9: Readable font size of the virtual smartphone at a magnification ratio" + ], + "image_footnote": [], + "bbox": [ + 101, + 106, + 472, + 238 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "phone. For instance, the Galaxy S5 gives $\\approx 89.4$ pixels/degree at $30\\mathrm{cm}$ distance.", + "bbox": [ + 81, + 306, + 480, + 333 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We tackle this issue by scaling up the virtual phone's size by a factor that ensures that the phone's display text remains legible. To determine this factor, we recruited three participants and asked them to record the minimum readable font sizes, while showing them a virtual smartphone (at a distance of $30~\\mathrm{cm}$ ) with various magnification ratios (increased by 0.1 from 1.0 to 2.7). Figure 9 shows that participants could read text with the font size= 12sp (the commonly used minimum font size for mobile apps) for magnification factors $\\geq 1.5$ . Accordingly, we used 1.5 as the default magnification ratio for the smartphone and its display. We also proportionately scaled up the user's rendered hand. User studies (Section 7) show that users found this configuration highly usable.", + "bbox": [ + 81, + 334, + 482, + 501 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.3 Rendering Virtual Hand", + "text_level": 1, + "bbox": [ + 83, + 512, + 326, + 527 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As discussed in Section 4.3, the rendering latency of the virtual hand is proportional to the number of vertices in the Delaunay triangulation-based mesh. To reduce the latency, we apply a nonuniform sampling approach. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the $x$ and $y$ axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We empirically determined the sampling rate $X$ , by varying $X$ and measuring both (i) the processing latency and (ii) the SSIM [12, 46] (Structural SIMilarity; a metric of perceived image quality) of the hand images, using 200 RGB-D frames. Figure 10 shows the results. Without any subsampling ( $X = 0\\%$ ), the rendering latency is 311.1 msec, which is too high for our responsiveness goal. We empirically downsample the internal pixels by a factor of 32 ( $X = 99.9\\%$ ), i.e., choosing every $32^{nd}$ pixel on the grid. This results in a latency of 26.9 msec, while keeping the SSIM = 0.976, a level indistinguishable with the original as perceived by a human.", + "bbox": [ + 81, + 531, + 482, + 796 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.4 Environment Emulation", + "text_level": 1, + "bbox": [ + 83, + 806, + 326, + 821 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To enable holistic evaluation of app interactions, Empath-D emulates not just the virtual phone, but the entire virtual world as well. In our current implementation, we emulated a crowded Urban Street environment, which includes crosswalks, traffic lights, pedestrians and commonplace roadside obstacles. To further mimic real-world", + "bbox": [ + 81, + 825, + 482, + 896 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg", + "image_caption": [ + "Figure 10: Rendering latency vs. image quality of the virtual hand" + ], + "image_footnote": [], + "bbox": [ + 547, + 106, + 885, + 242 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "movement, our implementation allows the user to navigate the virtual world by (i) rotating her head (this uses the head tracking ability of the VR device), and (ii) by 'walking in place', using the technique proposed in [45] as this does not require any additional hardware on the VR device.", + "bbox": [ + 511, + 309, + 913, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.5 VR Manager", + "text_level": 1, + "bbox": [ + 514, + 390, + 661, + 406 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This component currently executes on the VR smartphone, and is responsible for combining the output of the various components (Hand Tracker, Phone Tracker and Virtual Phone) in the virtual world. This component, implemented as a Unity application, renders these various components. This component is also responsible for applying the impairments on the output of the virtual world. The image effects simulating low vision impairments are defined as a script, Shaders in Unity.", + "bbox": [ + 511, + 407, + 913, + 520 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 EVALUATION", + "text_level": 1, + "bbox": [ + 514, + 531, + 661, + 545 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We now present a mix of system and user experiments to evaluate the performance and efficacy of our Empath-D implementation. Besides micro-benchmark studies, we conducted two experiments to capture user interaction with Empath-D. In Experiment 1, we examine the performance of Empath-D vs. a real-world smartphone, in the absence of any impairments. In Experiment 2, we consider an impairment-augmented version of Empath-D, comparing the performance of users against the use of commercial impairment simulation hardware.", + "bbox": [ + 511, + 549, + 913, + 675 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7.1 Micro-benchmark Performance of Empath-D", + "text_level": 1, + "bbox": [ + 514, + 686, + 839, + 719 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We measured the overall latency of Empath-D, both in terms of the delay in reflecting touch interactions in the virtual world and in terms of the hand tracking delay.", + "bbox": [ + 511, + 720, + 913, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7.1.1 End-to-end Latency of Touch Interaction. As a measure of the overall responsiveness of Empath-D, we computed the latency between a touch input, on the physical smartphone, and the resulting change in the content of the virtual smartphone, rendered in the VR display. To measure this, we utilised a high framerate camera (operating at 240 fps) to concurrently record both the screen of the physical smartphone and the virtual phone (displayed in the VR). The phone screen is coloured green initially, and was programmed to turn red as soon as it received a touch input. We repeated the", + "bbox": [ + 511, + 771, + 913, + 896 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 84, + 71, + 387, + 87 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Wonjung Kim et al.", + "bbox": [ + 792, + 73, + 911, + 85 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg", + "image_caption": [ + "Figure 11: Overhead of impairment simulation" + ], + "image_footnote": [], + "bbox": [ + 114, + 106, + 449, + 224 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "measurement 23 times, capturing (via the video frames) the time gap between (i) the physical smartphone screen turning red and (ii) the virtual smartphone turning red in the VR display. The end-to-end latency is 237.70 msec ( $SD = 20.43$ ).", + "bbox": [ + 81, + 285, + 480, + 339 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "By monitoring the intermediary computer, we obtained the breakdown of this delay: (i) smartphone responsiveness (the time from the user touching the screen till the time the phone transmits the touch event to the computer) $= 0.3$ msec $(SD = 0.16)$ ; (ii) computer emulation responsiveness (the time from receiving the touch event till the time the screenshot of the modified display is sent to the VR device) $= 141.37$ msec $(SD = 6.6)$ , and (iii) the VR responsiveness (the time from receiving the screenshot till it is rendered on the VR display) $= 10.46$ msec $(SD = 8.36)$ . The remaining latency ( $\\approx 87$ msec) can be attributed as the WiFi network latency. These micro-measurements suggest that the default Android emulator used in our studies was the dominant component of the latency. The default Android emulator is known to be fairly slow, and multiple third party emulators (e.g., Genymotion [16]) are reported to provide significantly lower latency. Accordingly, we anticipate that this overall latency can be reduced to $\\leq 150$ msec, without any significant architectural modification of Empath-D.", + "bbox": [ + 81, + 340, + 482, + 575 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7.1.2 End-to-end Latency of Virtual Hand. We also evaluated the latency between the physical movement of the user's hand and the rendering of this movement in the VR display. To capture this time difference, we displayed a small circle, at a specific point on the display, on both the smartphone and the virtual phone. Users were instructed to swipe a finger on the screen to reach the circle. We measured, over 20 experiments, the time (no. of frames from the previously used high framerate camera) between the occlusion of the circle on the physical phone and the resulting occlusion in the virtual phone, computing an average latency of $117.46\\mathrm{msec}$ ( $SD = 20.44$ ). Additionally, we measured the component delays of this rendering process as: (i) reading an RGBD frame: $4.90\\mathrm{msec}$ ( $SD = 0.58$ ); (ii) phone tracking: $4.56\\mathrm{msec}$ ( $SD = 0.25$ ); (iii) hand tracking: $8.0\\mathrm{msec}$ ( $SD = 1.58$ ), and (iv) the VR responsiveness (the time from receiving the hand mesh till it is rendered on the VR display): $26.99\\mathrm{msec}$ ( $SD = 5.22$ ). The remaining latency, attributable to the WiFi network, is $\\approx 73\\mathrm{msec}$ , consistent with the measurements reported above.", + "bbox": [ + 81, + 585, + 482, + 834 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7.2 Study Design for Usability Experiments", + "text_level": 1, + "bbox": [ + 83, + 848, + 449, + 864 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We then conducted user studies on the usability and real-world fidelity of our Empath-D implementation. The user study (approved", + "bbox": [ + 81, + 866, + 482, + 895 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg", + "table_caption": [ + "Table 2: Study Tasks and Conditions in Experiment 1" + ], + "table_footnote": [], + "table_body": "
TaskCond-itionImpairmentSimulator TypeEnviro-nment
T1-T4AnonenoneReal
BCataractsPhysicalReal
CnonenoneVirtual
DCataractsVirtualVirtual
EGlaucomaRealPhysical
FGlaucomaVirtualVirtual
", + "bbox": [ + 535, + 132, + 888, + 247 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg", + "table_caption": [ + "Table 3: Smartphone Interaction Tasks in Experiment 1" + ], + "table_footnote": [], + "table_body": "
Task TypeTask CodeTask Description
Everyday Phone UseT1Perform a Calculation
T2Add an Alarm
T3Search, Save Image on Browser
Controlled PointingT4Number Search and Point
", + "bbox": [ + 542, + 287, + 883, + 391 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "by our institution's IRB) consisted of 12 users (9 males) with no pre-existing uncorrected vision impairments. Users were aged 24-39, with a mean age of 30.3 years $(\\mathrm{SD} = 5)$ .", + "bbox": [ + 511, + 411, + 913, + 452 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Study Tasks and Measures. We adopted a repeated measures design, with participants counterbalanced for condition order (see Table 2 for the conditions). Participants were asked to perform four different tasks split into two task types; everyday phone use, and controlled pointing (see Table 3). Users were asked to perform all tasks using two-handed interaction, holding the phone at a distance that they normally would during daily use. We chose two-handed interaction to eliminate for phone balancing that is typical in one-handed interaction given the typical size of today's smartphones.", + "bbox": [ + 511, + 452, + 913, + 577 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "T1-T3 are everyday tasks users perform on a smartphone. They cover smartphone touch interaction of taps, swipes, and long press, on UI widgets such as keyboards, buttons and scrolling content. Users were asked to experience performing these tasks under six conditions, including under impairments (both using the physical hardware and the VR device). At the end of all three tasks (T1-T3), users completed the NASA-TLX[18] survey to indicate their perceived workload during task performance. T4, on the other hand, is a controlled pointing task experiment. Participants were given a stimulus number and then asked to click on the button with the corresponding number, as quickly and as precisely as they could. (See Figure 12 for a screenshot of the application used in this task.) Users repeated this task 80 times in succession, for each of the six conditions (A-F; see Table 2). We recorded the touch times and positions with the task app. We conducted a short semi-structured interview at the end of the study to understand users' experiences with, and perceptions of, the physical and virtual impairment simulations.", + "bbox": [ + 511, + 577, + 913, + 811 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Instruments: We compared Empath-D with a commercial physical impairment simulator [13]. To calibrate for visual acuity, we adapted a test similar to a Snellen eye test chart [42] - showing rows of letters with each lower row having a smaller font size. We first used the physical impairment simulator to obtain the minimum acceptable font size. Using the same test page in the VR, we applied", + "bbox": [ + 511, + 811, + 913, + 896 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "bbox": [ + 84, + 71, + 460, + 87 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 611, + 71, + 913, + 87 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "11", + "bbox": [ + 267, + 119, + 297, + 140 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
271613
251815
232026
121417
211119
221024
", + "bbox": [ + 205, + 176, + 359, + 319 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 12: Screenshot of a test application for the pointing task", + "bbox": [ + 81, + 334, + 482, + 361 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "the impairment and gradually adjusted the severity until we hit the minimum acceptable font size. To calibrate the inner circle of clarity for glaucoma, we implemented an app that allows us to adjust the diameter of a coloured circle. We then used the physical impairment simulator for glaucoma, and adjusted the coloured circle to the point in which the circle reaches the fringe for clarity. We then calibrated the virtual glaucoma simulation in a similar manner. Three independent measurements for visual acuity and circle of clarity were taken from the research team and averaged to determine the final calibration parameters of font size $= 12$ sp and diameter $= 60$ mm.", + "bbox": [ + 81, + 401, + 482, + 553 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7.3 Empath-D vs. Physical Smartphone", + "text_level": 1, + "bbox": [ + 83, + 570, + 413, + 585 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We first investigate whether the VR-based interaction is a sufficiently faithful replica of the real-world interaction that a user would have with a regular smartphone, in the absence of any impairments.", + "bbox": [ + 81, + 588, + 482, + 643 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Touch Accuracy: In all six conditions, users were able to achieve high levels of button touch accuracy (see Table 4), with the accuracy being $98.8\\%$ ( $SD = 1.67$ ) when the users interacted unimpaired with the VR device. Comparing the accuracies between the physical smartphone and the VR device, we noted that the VR condition had an accuracy of $99.12\\%$ ( $SD = 1.32$ ) (across all 6 conditions), whereas the use of the physical smartphone provided $100\\%$ accuracy. In terms of the location accuracy, we noted a difference of $2.28 \\, \\text{mm}$ ( $SD = 2.98$ ) between the use of Empath-D vs. a physical smartphone. This difference is well within the uncertainty associated with finger touch interactions, and thus demonstrates that user performance was equivalent across both Empath-D and a physical smartphone.", + "bbox": [ + 81, + 643, + 482, + 810 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Perceived Workload: NASA-TLX scores indicated that the users did perceive significant differences in their workload using Empath-D, compared to use of the physical smartphone ( $Z = 2.824$ , $p = 0.005 < 0.05$ ). This does suggest that the navigating an app within the VR device does require greater cognitive effort than simply interacting with a regular smartphone. However, it is difficult to", + "bbox": [ + 81, + 810, + 482, + 893 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg", + "table_caption": [ + "Table 4: Accuracy of Button Touch Across All Users" + ], + "table_footnote": [], + "table_body": "
ImpairmentEnvironmentAccuracy (SD) %
NonePhysical100
Virtual98.79 (1.67)
CataractsPhysical100
Virtual99.09 (1.36)
GlaucomaPhysical100
Virtual99.49 (0.82)
", + "bbox": [ + 550, + 132, + 879, + 234 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "decipher whether this difference is due to Empath-D-specific issues, or a general lack of familiarity with VR devices.", + "bbox": [ + 511, + 268, + 913, + 296 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We additionally investigated the subjective feedback captured by the semi-structured interview. $83\\%$ (10) of the users reported perceiving increased latency while using Empath-D, while 2 users indicated that they felt no noticeable latency difference. However, all 12 users indicated that the performance of Empath-D was \"acceptable\", and they would be able to use the Empath-D system for testing the usability of apps, as long as the apps do not require extremely low-latency interactions. (3 users indicated that the system might not be usable for testing real-time games.)", + "bbox": [ + 511, + 297, + 913, + 435 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7.4 Empath-D vs. Hardware Impairment Simulators", + "text_level": 1, + "bbox": [ + 513, + 450, + 856, + 481 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We now study the performance of Empath-D vis-a-vis impairments generated using commercially available hardware. Figure 11 shows the overhead of Empath-D under impairment conditions, demonstrating that Empath-D is able to operate without significant performance loss even in the presence of impairments.", + "bbox": [ + 511, + 484, + 913, + 555 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Touch Accuracy: Table 4 enumerates the accuracy for the pointing task (T4) for two distinct impairments (Cataract & Glaucoma), for both the VR-based Empath-D system and the hardware impairment simulator. We see that, in the Cataract condition, Empath-D had a mean accuracy of $99.09\\%$ , which is virtually indistinguishable from that of the hardware device ( $100\\%$ ). A similar pattern was observed for the Glaucoma impairment ( $99.49\\%$ for Empath-D vs. $100\\%$ for Hardware). In terms of the location accuracy, we noted a difference of $1.7 \\, \\text{mm}$ ( $SD = 1.9$ ) (for Cataract) and $1.2 \\, \\text{mm}$ ( $SD = 1.6$ ) (for Glaucoma) between the use of Empath-D vs. the impairment hardware. Once again, this difference is well within the uncertainty associated with finger touch interactions. These results provide strong evidence that Empath-D is able to emulate impairment conditions that are equivalent to that of dedicated, commercial hardware.", + "bbox": [ + 511, + 555, + 913, + 762 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Perceived Workload: The numerical TLX scores indicated that there was no significant difference for Cataracts; however, the difference for Glaucoma was significant $(Z = 3.061$ , $p = 0.002 < 0.05)$ with users indicating a higher perceived workload for the VR device.", + "bbox": [ + 511, + 762, + 913, + 818 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7.5 Motion sickness", + "text_level": 1, + "bbox": [ + 513, + 834, + 691, + 847 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "At the end of the user study, we asked each participant if they felt discomfort or unwell. Only two of the twelve participants reported slight motion sickness while using Empath-D. Motion sickness may", + "bbox": [ + 511, + 852, + 913, + 893 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 83, + 71, + 385, + 87 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Wonjung Kim et al.", + "bbox": [ + 790, + 71, + 913, + 85 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "arise from: (1) the use of the VR display itself, and (2) the latency from Empath-D. However, it is difficult to separate the two.", + "bbox": [ + 83, + 107, + 480, + 135 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The effects of motion sickness are notably minor in our current prototype of Empath-D. The nature of our experimentation intensifies the use of the VR display, whereas practical use of Empath-D is likely to be more interspersed between app redesigns. We further discuss how we may improve on latency in Section 9.2 to reduce motion sickness that may result from the latency of Empath-D.", + "bbox": [ + 81, + 135, + 480, + 218 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "8 RELATED WORK", + "text_level": 1, + "bbox": [ + 83, + 246, + 256, + 258 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Designing for Inclusiveness. Newell et al. [31] pointed out that traditional user-centred design techniques provide little guidance for designing interfaces for elderly and disabled users due to the large variation amongst the type and degree of impairments. They also highlighted that the standard guidelines for designing disabled-friendly UIs are too general [30] and lacked empathy for users. For instance the WCAG 2.0 lists that the use of colour \"is not used as the only visual means of conveying information, indicating an action, prompting a response or distinguishing a visual element\". This requires interpretation by the designer into specific designs in his application. Over the years, various accessibility design guidelines (such as WCAG 2.0 [3], IBM Accessibility Checklist [38], US Section 508 Standards [2]) and tools (aChecker [15]) have been proposed and refined. However, the problems pointed out by Newell are remained unsolved to a large extent, which hinders elaborate design for a target user group with a specific impairment.", + "bbox": [ + 81, + 263, + 482, + 484 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Simulated Design. There exists prior work on helping UI designers design better interfaces for people suffering from vision impairments. Higuchi et al. [19] proposed a tool to simulate the visual capabilities of the elderly for the design of control panels, while Mankoff et al. [26] developed a tool to simulate a user with visual and motor impairments on the desktop screen. SIMVIZ [9, 47] uses the Oculus Rift VR device to simulate visual impairments to examine reading text on a smartphone. For audio modalities, Werfel et al. [47] simulated hearing ailments by using a pair of microphones with equalised headphones.", + "bbox": [ + 81, + 486, + 482, + 625 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Different from prior works, Empath-D uses VR as the medium for immersive evaluation to 1) flexibly support wider groups of impaired users, and 2) allow naturalistic interactions with a mobile phone in a virtual environment. This novel approach supports ecological validity in testing applications and is key for mobile apps which go beyond the static settings of previous work.", + "bbox": [ + 81, + 625, + 482, + 707 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "While previous work has focused on simulation in single modality (visual or auditory), Empath-D is able to flexibly combine modalities to support any application type, ailment (visual, auditory, motor) and usage environment.", + "bbox": [ + 81, + 707, + 482, + 762 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "System Support for Accessibility. Modern mobile OSes provide accessibility support; in particular, it allows users with far-sightedness to increase fonts and users with blindness to interact through vocal interfaces. Also, Zhong et al. enhanced Android accessibility for users with hand tremor by reducing fine pointing and steady tapping [48]. We believe Empath-D will significantly expand basic accessibility support of commodity devices and accelerates the design and deployment of various accessibility add-ons for different impaired users.", + "bbox": [ + 81, + 762, + 482, + 887 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Testing of Mobile Applications. Recently there have been many systems, such as VanarSena [37], AMC [22], Puma [17], DynoDroid [25], DECAF [24], AppsPlayground [36], for automatically testing and identifying various types of UI and systems bugs in mobile applications. Empath-D takes a different approach in that we do not detect bugs after the application is developed and deployed. Instead, we allow the designer to test early iterations of the designs rapidly. In this way, we hope to reduce the pain of having to make significant UI changes at the end of the design cycle - or worse, end with an application that cannot be used effectively by the target impaired demographic.", + "bbox": [ + 511, + 107, + 913, + 258 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "9 DISCUSSION", + "text_level": 1, + "bbox": [ + 513, + 276, + 651, + 290 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Our current studies indicate the considerable promise of Empath-D, as a mechanism for rapid and empathetic evaluation of app usability. We now discuss some additional studies and issues that we intend to explore further.", + "bbox": [ + 511, + 294, + 913, + 349 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "9.1 User study with Designers", + "text_level": 1, + "bbox": [ + 513, + 368, + 769, + 383 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We conducted a short user study with two mobile app developers to qualitatively examine Empath-D in actual use. Both developers have previously worked to create an Android mobile application, which was used as the baseline for the study. The developers were tasked with redesigning the mobile app for the glaucoma-impaired under two conditions: 1) without Empath-D, but with materials describing glaucoma and showing functionally accurate examples of glaucoma, and 2) with the same materials, and Empath-D. Both developers agreed that Empath-D helped them improve their designs over the baseline condition. The developers reported that Empath-D allowed them to improve their designs in two ways: 1) they can focus their attention on re-designing particular problematic parts of the UI, and 2) they are able to appropriately calibrate their modifications (for instance increasing the font size may help, but text that is too large will also cause glaucoma sufferers to visually scan more, causing fatigue).", + "bbox": [ + 511, + 386, + 913, + 607 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "9.2 Dealing with Latency Issues", + "text_level": 1, + "bbox": [ + 513, + 625, + 785, + 641 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Our experimental studies indicate that users are able to utilise Empath-D effectively for \"conventional\" apps—i.e., those that typically involve sporadic interaction by users with UI elements, such as buttons and keyboards. The current end-to-end latency (of $\\approx$ 200 msec) is not an impediment for high-fidelity evaluation of such apps. However, the participants also indicated that this latency (lag between user actions and rendering in the VR display) would pose a problem for highly latency-sensitive applications, such as games. At present, it is thus appropriate to state that Empath-D potentially needs additional optimisations to support such applications. The most obvious improvement would be to replace the default Android emulator with a faster, custom emulation engine—this is likely to reduce $\\approx$ 100 msec of the delay budget.", + "bbox": [ + 511, + 643, + 913, + 823 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The current implementation streams JPEG images (hand, emulator's screen) from the intermediary computer to the VR smartphone. We plan to adopt a low-latency video streaming codec such as H.265 HEVC [43], which would help reduce networking and rendering latency. OS-level optimisations (e.g., preemptive priority", + "bbox": [ + 511, + 824, + 913, + 893 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "bbox": [ + 84, + 73, + 460, + 87 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 611, + 73, + 911, + 87 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "for inter-component messages) may be needed to support even lower latency.", + "bbox": [ + 81, + 107, + 480, + 133 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Recently, several works have proposed techniques for achieving high-quality VR experience on mobile devices [7, 10, 21]. Empath- $D$ could borrow some techniques to improve latency and video quality.", + "bbox": [ + 81, + 133, + 480, + 191 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "9.3 User Performance with VR Devices", + "text_level": 1, + "bbox": [ + 83, + 203, + 413, + 217 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Moreover, our user studies also indicated that the time for performing tasks (T1-T4) was marginally higher when using the VR environment, compared to the direct use of a real-world smartphone. More specifically, for the pointing task T4, there was an average difference of 654 msec in the task completion time using Empath-D, compared to the smartphone. In addition, anecdotal comments suggest that continued use of the VR device, for longer-lived sessions, might pose additional usability challenges. For example, a couple of users indicated some minor muscle fatigue, most likely as a result of using a 'heavy' VR device. It is an open question whether these issues will be mitigated over time, as VR devices become lighter and more ergonomic, and as users have greater familiarity with the use of VR devices.", + "bbox": [ + 81, + 220, + 482, + 400 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "9.4 Advanced Uses of Empath-D", + "text_level": 1, + "bbox": [ + 83, + 415, + 359, + 430 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Our current implementation of Empath-D supports the virtualisation of certain output modalities (specifically the display and audio) of the emulated app. The vision of Empath-D can be extended to create other richer interaction modes, often blending virtual and augmented reality (AR) settings. As an example, certain emulation conditions may need to generate and integrate synthetic sensor traces, to replace the real sensor traces from the smartphone-e.g., to mimic the user's movement in locations, such as forests and mountains, the phone's real GPS trace would need to be replaced by a synthetic GPS trace as in [27, 28]. Similarly, in some cases, the app itself might need to take inputs from the VR world-e.g., if the app was being used to magnify certain objects embedded in the VR world. While such use cases can be supported, they will require enhancements to the current Empath-D framework, and it is likely that the implementation may surface additional challenges, in terms of computational overhead and latency.", + "bbox": [ + 81, + 433, + 482, + 655 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "9.5 Developing Impairment Filters and Profiles", + "text_level": 1, + "bbox": [ + 83, + 667, + 478, + 684 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To demonstrate the viability of Empath-D, we focused on demonstrating the ability to simulate visual impairments and in particular cataracts and glaucoma. As we explored, these impairments have functional aspects that are commonly employed to characterise them, such as visual acuity or contrast sensitivity, and are often accompanied by standard tests such as the Snellen eye test chart [42] and Pelli-Robson contrast sensitivity chart [34] respectively. From examining the commercial physical impairment simulator and our experimentation, we believe that Empath-D has the ability to functionally simulate other impairments.", + "bbox": [ + 81, + 686, + 482, + 825 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We recognise two important directions that Empath-D needs address to improve impairment simulation and use. First, impairment filters have to be developed in concert with medical professionals who are subject matter experts in the areas of the specific pathologies. This helps to develop a library of impairment filters. Second,", + "bbox": [ + 81, + 825, + 482, + 896 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "with verified impairment filters, we may create impairment profiles, which characterise groups of users with possibly overlapping requirements. For instance, a hypothetical impairment profile may calibrate for a demographic of a range of ages, sex, and percentage of the population who may have myopia and cataracts—both which affect visual acuity. With impairment profiles, app developers may easily select and understand the demographic to which they are designing for.", + "bbox": [ + 511, + 107, + 913, + 218 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "10 CONCLUSION", + "text_level": 1, + "bbox": [ + 514, + 229, + 671, + 243 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We presented the design and evaluation of Empath-D, a framework that allows app developers to 'step into the shoes' of impaired users, and perform an empathetic evaluation of their app interfaces. Our key idea is to utilise a virtual world (using a commodity VR device) to present an impaired view of the app's interface, while allowing the user to interact naturally with a real commodity smartphone in the physical world. Overcoming the current computational limitations (of the VR device and the Android emulator) required us to make careful system choices, such as (i) appropriate tradeoffs between the resolution and frame rate for rendering the virtual smartphone, (ii) subsampling of the mesh representing the user's hand and (iii) scaling up the size of the virtual smartphone to overcome the lower resolution of the VR device. User studies show that Empath-D is effective in (a) providing usability that is equivalent to using a real app (on a real smartphone), for applications that do not require ultra-low latency and (b) emulating impairments in a similar fashion to custom hardware devices. We believe that Empath-D can be a powerful new paradigm for effective bidirectional integration between real-world user actions and virtual worlds, and that this can enable additional immersive applications beyond just 'impairment emulation'.", + "bbox": [ + 511, + 248, + 913, + 539 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "11 ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 514, + 551, + 751, + 566 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We are thankful to our shepherd Prof. Xia Zhou and all anonymous reviewers for their valuable reviews. This research is supported partially by Singapore Ministry of Education Academic Research Fund Tier 2 under research grant MOE2014-T2-1063, and by the National Research Foundation, Prime Minister's Office, Singapore under its IDM Futures Funding Initiative. All findings and recommendations are those of the authors and do not necessarily reflect the views of the granting agency, or SMU.", + "bbox": [ + 511, + 571, + 913, + 681 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 516, + 695, + 633, + 708 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] [n. d.]. AGNES (Age Gain Now Empathy Systems. ([n. d.]). Retrieved 2018-04-13 from http://agelab.mit.edu/agnes-age-gain-now-empathy-system", + "[2] [n. d]. US Section 508 Standards. ([n. d]). Retrieved 2018-04-13 from https: //www.section508.gov/", + "[3] 2008. Web Content Accessibility Guidelines (WCAG) 2.0. (11 December 2008). Retrieved 2018-04-13 from https://www.w3.org/TR/WCAG20/", + "[4] 2016. Samsung Galaxy S7 Specifications. (2016). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/galaxy-s7/#!/spec", + "[5] 2017. Samsung Gear VR Specifications. (2017). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/gear-vr/specs/", + "[6] 2018. SolvePnP, Camera Calibration and 3D Reconstruction, OpenCV. (2018). Retrieved 2018-04-13 from https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html", + "[7] Omid Abari, Dinesh Bharadia, Austin Duffield, and Dina Katabi. 2017. Enabling High-Quality Untethered Virtual Reality. In 14th USENIX Symposium on Networked Systems Design and Implementation (NSDI 17). USENIX Association, Boston, MA, 531-544. https://www.usenix.org/conference/nsdi17/technical-sessions/presentation/abari" + ], + "bbox": [ + 521, + 712, + 913, + 893 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 83, + 71, + 387, + 87 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Wonjung Kim et al.", + "bbox": [ + 792, + 71, + 913, + 85 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[8] Ardalan Amiri Sani, Kevin Boos, Min Hong Yun, and Lin Zhong. 2014. Rio: A System Solution for Sharing I/O Between Mobile Systems. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 259-272. https://doi.org/10.1145/2594368.2594370", + "[9] Halim Cagri Ates, Alexander Fiannaca, and Eelke Folmer. 2015. Immersive Simulation of Visual Impairments Using a Wearable See-through Display. In Proceedings of the Ninth International Conference on Tangible, Embedded, and Embodied Interaction (TEI '15). ACM, New York, NY, USA, 225-228. https://doi.org/10.1145/2677199.2680551", + "[10] Kevin Boos, David Chu, and Eduardo Cuervo. 2016. FlashBack: Immersive Virtual Reality on Mobile Devices via Rendering Memozoation. In Proceedings of the 14th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '16). ACM, New York, NY, USA, 291-304. https://doi.org/10.1145/2906388.2906418", + "[11] Kenny Tsu Wei Choo, Rajesh Krishna Balan, Tan Kiat Wee, Jagmohan Chauhan, Archan Misra, and Youngki Lee. 2017. Empath-D: Empathetic Design for Accessibility. In Proceedings of the 18th International Workshop on Mobile Computing Systems and Applications (HotMobile '17). ACM, New York, NY, USA, 55-60. https://doi.org/10.1145/3032970.3032981", + "[12] Eduardo Cuervo, Alec Wolman, Landon P. Cox, Kiron Lebeck, Ali Razeen, Stefan Saroiu, and Madanlal Musuvathi. 2015. Kahawai: High-Quality Mobile Gaming Using GPU Offload. In Proceedings of the 13th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '15). ACM, New York, NY, USA, 121-135. https://doi.org/10.1145/2742647.2742657", + "[13] Marshall Flax. 2018. Low Vision Simulators. (2018). Retrieved 2018-04-13 from https://www.lowvisionsimulators.com/", + "[14] S. Garrido-Jurado, R. Mu noz Salinas, F.J. Madrid-Cuevas, and M.J. Marin-Jiménez. 2014. Automatic generation and detection of highly reliable fiducial markers under occlusion. Pattern Recognition 47, 6 (2014), 2280-2292. https://doi.org/10.1016/j.patcog.2014.01.005", + "[15] Greg Gay and Cindy Qi Li. 2010. AChecker: Open, Interactive, Customizable, Web Accessibility Checking. In Proceedings of the 2010 International Cross Disciplinary Conference on Web Accessibility (W4A) (W4A '10). ACM, New York, NY, USA, Article 23, 2 pages. https://doi.org/10.1145/1805986.1806019", + "[16] Genymotion. [n. d.]. Genymotion Android Emulator. ([n. d.]). Retrieved 2018-04-13 from https://www.genymotion.com/", + "[17] Shuai Hao, Bin Liu, Suman Nath, William G.J. Halfond, and Ramesh Govindan. 2014. PUMA: Programmable UI-automation for Large-scale Dynamic Analysis of Mobile Apps. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 204-217. https://doi.org/10.1145/2594368.2594390", + "[18] Sandra G Hart and Lowell E Staveland. 1988. Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research. In Advances in psychology. Vol. 52. Elsevier, 139-183.", + "[19] Kazunori Higuchi, Yasuo Sakaguchi, Kazuhiko Sugiyama, and Tomoaki Nakano. 1999. Simulating the human vision of elderly for designing control panels. In Systems, Man, and Cybernetics, 1999. IEEE SMC'99 Conference Proceedings. 1999 IEEE International Conference on, Vol. 5. IEEE, 703-708.", + "[20] Intel. 2016. Intel®RealSense™ Camera SR300 Product Specifications. (2016). Retrieved 2018-04-13 from https://ark.intel.com/products/92329/Intel-RealSense-Camera-SR300", + "[21] Zeci Lai, Y. Charlie Hu, Yong Cui, Linhui Sun, and Ningwei Dai. 2017. Furion: Engineering High-Quality Immersive Virtual Reality on Today's Mobile Devices. In Proceedings of the 23rd Annual International Conference on Mobile Computing and Networking (MobiCom '17). ACM, New York, NY, USA, 409-421. https://doi.org/10.1145/3117811.3117815", + "[22] Kyungmin Lee, Jason Flinn, T.J. Giuli, Brian Noble, and Christopher Peplin. 2013. AMC: Verifying User Interface Properties for Vehicular Applications. In Proceeding of the 11th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '13). ACM, New York, NY, USA, 1-12. https://doi.org/10.1145/2462456.2464459", + "[23] Gordon E Legge, Sing-Hang Cheung, Deyue Yu, Susana TL Chung, Hye-Won Lee, and Daniel P Owens. 2007. The case for the visual span as a sensory bottleneck in reading. Journal of Vision 7, 2 (2007), 9-9.", + "[24] Bin Liu, Suman Nath, Ramesh Govindan, and Jie Liu. 2014. DECAF: Detecting and Characterizing Ad Fraud in Mobile Apps. In 11th USENIX Symposium on Networked Systems Design and Implementation (NSDI 14). USENIX Association, Seattle, WA, 57-70. https://www.usenix.org/conference/nsdi14/technical-sessions/presentation/liu_bin", + "[25] Aravind Machiry, Rohan Tahiliani, and Mayur Naik. 2013. Dynodroid: An Input Generation System for Android Apps. In Proceedings of the 2013 9th Joint Meeting on Foundations of Software Engineering (ESEC/FSE 2013). ACM, New York, NY, USA, 224-234. https://doi.org/10.1145/2491411.2491450", + "[26] Jennifer Mankoff, Holly Fait, and Ray Juang. 2005. Evaluating accessibility by simulating the experiences of users with vision or motor impairments. IBM Systems Journal 44, 3 (2005), 505-517." + ], + "bbox": [ + 86, + 108, + 483, + 886 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Chulhong Min, Seungchul Lee, Changhun Lee, Youngki Lee, Seungwoo Kang, Seungpyo Choi, Wonjung Kim, and Junehwa Song. 2016. PADA: Power-aware Development Assistant for Mobile Sensing Applications. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '16). ACM, New York, NY, USA, 946-957. https://doi.org/10.1145/2971648.2971676", + "[28] Chulhong Min, Youngki Lee, Chungkuk Yoo, Seungwoo Kang, Sangwon Choi, Pillsoon Park, Inseok Hwang, Younghyun Ju, Seungpyo Choi, and Junehwa Song. 2015. PowerForecaster: Predicting Smartphone Power Impact of Continuous Sensing Applications at Pre-installation Time. In Proceedings of the 13th ACM Conference on Embedded Networked Sensor Systems (SenSys '15). ACM, New York, NY, USA, 31-44. https://doi.org/10.1145/2809695.2809728", + "[29] Produkt + Projekt Wolfgang Moll. [n. d.]. Age simulation suit GERT - the GERontic Test suit. ([n. d.]). Retrieved 2018-04-13 from http://www.age-simulation-suit.com/", + "[30] Alan Newell and Peter Gregor. 1988. Human computer interaction for people with disabilities. (1988).", + "[31] Alan F Newell, Peter Gregor, Maggie Morgan, Graham Pullin, and Catriona Macaulay. 2011. User-sensitive inclusive design. Universal Access in the Information Society 10, 3 (2011), 235-243.", + "[32] Nvidia. 2016. GeForce GTX 1080 Specifications. (2016). Retrieved 2018-04-13 from https://www.geforce.com/hardware/Desktop-gpus/geforce-gtx-1080/ specifications", + "[33] National Institute on Aging. 2016. World's older population grows dramatically. (28 March 2016). Retrieved 2018-04-13 from https://www.nih.gov/news-events/news-releases/worlds-older-population-grows-dramatically", + "[34] DG Pelli, JG Robson, et al. 1988. The design of a new letter chart for measuring contrast sensitivity. In Clinical Vision Sciences. CiteSeer.", + "[35] Android Open Source Project. 2017. SurfaceFlinger and HardwareComposer. (March 2017). Retrieved 2018-04-13 from https://source.android.com/devices/ graphics/arch-sf-hwc", + "[36] Vaibhav Rastogi, Yan Chen, and William Enck. 2013. AppsPlayground: Automatic Security Analysis of Smartphone Applications. In Proceedings of the Third ACM Conference on Data and Application Security and Privacy (CODASPY '13). ACM, New York, NY, USA, 209-220. https://doi.org/10.1145/2435349.2435379", + "[37] Lenin Ravindranath, Suman Nath, Jitendra Padhye, and Hari Balakrishnan. 2014. Automatic and Scalable Fault Detection for Mobile Applications. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 190-203. https://doi.org/10.1145/2594368.2594377", + "[38] IBM Accessibility Research. 2017. IBM Accessibility Checklist 7.0. (18 July 2017). Retrieved 2018-04-13 from http://www-03.ibm.com/able/guidelines/ci162/accessibility_checklist.html", + "[39] Justin B. Rousek, Sonja Koneczny, and M. Susan Hallbeck. 2009. Simulating Visual Impairment to Detect Hospital Wayfinding Difficulties. Proceedings of the Human Factors and Ergonomics Society Annual Meeting 53, 8 (Oct. 2009), 531-535.", + "[40] Samsung. 2014. Samsung Galaxy S5 Specifications. (2014). Retrieved 2018-04-13 from http://www.samsung.com/uk/smartphones/galaxy-s5-g900f/SM-G900FZKABTU/", + "[41] Alvy Ray Smith and James F. Blinn. 1996. Blue Screen Matting. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '96). ACM, New York, NY, USA, 259-268. https://doi.org/10.1145/237170.237263", + "[42] Herman Snellen. 1873. Probebuchstaben zur bestimmung der sehscharfe. Vol. 1. H. Peters.", + "[43] G. J. Sullivan, J. R. Ohm, W. J. Han, and T. Wiegand. 2012. Overview of the High Efficiency Video Coding (HEVC) Standard. IEEE Transactions on Circuits and Systems for Video Technology 22, 12 (Dec 2012), 1649-1668. https://doi.org/10.1109/TCSVT.2012.2221191", + "[44] Unity Technologies. [n. d.]. Unity. ([n. d.]). Retrieved 2018-04-13 from https://unity3d.com/", + "[45] Sam Tregillus and Eelke Folmer. 2016. VR-STEP: Walking-in-Place Using Inertial Sensing for Hands Free Navigation in Mobile VR Environments. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). ACM, New York, NY, USA, 1250-1255. https://doi.org/10.1145/2858036.2858084", + "[46] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing 13, 4 (2004), 600-612.", + "[47] Fabian Werfel, Roman Wiche, Jochen Feitsch, and Christian Geiger. 2016. Empathizing Audiovisual Sense Impairments: Interactive Real-Time Illustration of Diminished Sense Perception. In Proc. of AH.", + "[48] Yu Zhong, Astrid Weber, Casey Burkhardt, Phil Weaver, and Jeffrey P. Bigham. 2015. Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping. In Proceedings of the 12th Web for All Conference (W4A '15). ACM, New York, NY, USA, Article 29, 10 pages. https://doi.org/10.1145/2745555.2747277", + "[49] George J. Zimmerman. 1979. Zimmerman Low Vision Simulation Kit. (1979). Retrieved 2018-04-13 from http://www.lowvisionsimulationkit.com/" + ], + "bbox": [ + 516, + 108, + 913, + 895 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Empath-D: VR-based Empathetic App Design for Accessibility", + "bbox": [ + 84, + 73, + 460, + 87 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "MobiSys '18, June 10-15, 2018, Munich, Germany", + "bbox": [ + 611, + 73, + 911, + 87 + ], + "page_idx": 12 + } +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_model.json b/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..93dc20cbb3f59c478b564d8d7caaa05a84a52dbe --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_model.json @@ -0,0 +1,3295 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.104, + 0.102, + 0.895, + 0.128 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.14, + 0.315, + 0.187 + ], + "angle": 0, + "content": "Wonjung Kim\\* wjkim@nclab.kaist.ac.kr KAIST" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.14, + 0.619, + 0.189 + ], + "angle": 0, + "content": "Kenny Tsu Wei Choo kenny.choo.2012@smu.edu.sg Singapore Management University" + }, + { + "type": "text", + "bbox": [ + 0.65, + 0.14, + 0.886, + 0.189 + ], + "angle": 0, + "content": "Youngki Lee \nyoungkilee@smu.edu.sg \nSingapore Management University" + }, + { + "type": "text", + "bbox": [ + 0.307, + 0.2, + 0.422, + 0.215 + ], + "angle": 0, + "content": "Archan Misra" + }, + { + "type": "text", + "bbox": [ + 0.29, + 0.217, + 0.441, + 0.232 + ], + "angle": 0, + "content": "archanm@smu.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.247, + 0.233, + 0.482, + 0.247 + ], + "angle": 0, + "content": "Singapore Management University" + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.2, + 0.721, + 0.217 + ], + "angle": 0, + "content": "Rajesh Krishna Balan" + }, + { + "type": "text", + "bbox": [ + 0.568, + 0.218, + 0.7, + 0.233 + ], + "angle": 0, + "content": "rajesh@smu.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.233, + 0.75, + 0.247 + ], + "angle": 0, + "content": "Singapore Management University" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.256, + 0.185, + 0.27 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.274, + 0.484, + 0.525 + ], + "angle": 0, + "content": "With app-based interaction increasingly permeating all aspects of daily living, it is essential to ensure that apps are designed to be inclusive and are usable by a wider audience such as the elderly, with various impairments (e.g., visual, audio and motor). We propose Empath-D, a system that fosters empathetic design, by allowing app designers, in-situ, to rapidly evaluate the usability of their apps, from the perspective of impaired users. To provide a truly authentic experience, Empath-D carefully orchestrates the interaction between a smartphone and a VR device, allowing the user to experience simulated impairments in a virtual world while interacting naturally with the app, using a real smartphone. By carefully orchestrating the VR-smarphone interaction, Empath-D tackles challenges such as preserving low-latency app interaction, accurate visualization of hand movement and low-overhead perturbation of I/O streams. Experimental results show that user interaction with Empath-D is comparable (both in accuracy and user perception) to real-world app usage, and that it can simulate impairment effects as effectively as a custom hardware simulator." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.536, + 0.221, + 0.55 + ], + "angle": 0, + "content": "CCS CONCEPTS" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.554, + 0.485, + 0.625 + ], + "angle": 0, + "content": "- Human-centered computing \\(\\rightarrow\\) Systems and tools for interaction design; Ubiquitous and mobile computing systems and tools; Accessibility design and evaluation methods; Accessibility systems and tools; Ubiquitous and mobile computing design and evaluation methods;" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.636, + 0.192, + 0.65 + ], + "angle": 0, + "content": "KEYWORDS" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.655, + 0.484, + 0.682 + ], + "angle": 0, + "content": "empathetic design; accessibility; mobile design; virtual reality; multi-device, distributed user interfaces" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.688, + 0.231, + 0.699 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.7, + 0.483, + 0.739 + ], + "angle": 0, + "content": "Wonjung Kim, Kenny Tsu Wei Choo, Youngki Lee, Archan Misra, and Rajesh Krishna Balan. 2018. Empath-D: VR-based Empathetic App Design for Accessibility. In MobiSys '18: The 16th Annual International Conference on Mobile" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.748, + 0.483, + 0.771 + ], + "angle": 0, + "content": "*This work was done while the author was on an internship at Singapore Management University" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.78, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.853, + 0.312, + 0.864 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.865, + 0.304, + 0.875 + ], + "angle": 0, + "content": "\\(\\odot\\) 2018 Association for Computing Machinery." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.875, + 0.295, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 978-1-4503-5720-3/18/06...$15.00" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.884, + 0.273, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3210240.3210331" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.265, + 0.905, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.605, + 0.46, + 0.825, + 0.474 + ], + "angle": 0, + "content": "Figure 1: Overview of Empath-D" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.502, + 0.916, + 0.528 + ], + "angle": 0, + "content": "Systems, Applications, and Services, June 10-15, 2018, Munich, Germany. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3210240.3210331" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.545, + 0.688, + 0.558 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.563, + 0.915, + 0.715 + ], + "angle": 0, + "content": "Digital interactions have become increasingly commonplace and immersive. We now constantly interact with our personal devices and computing-enhanced ambient objects (such as coffeemakers, home automation systems and digital directories), while engaging in everyday activities, such as commuting, shopping or exercising. Given the ubiquity of such interactions, it is important to ensure that the associated computing interfaces remain accessible to segments of the population, such as the elderly, who suffer from various impairments. The global elderly population is projected to reach \\(16.7\\%\\) by 2050 [33], and such users suffer disproportionately from impairments (e.g., vision) that hinder accessibility." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.716, + 0.916, + 0.896 + ], + "angle": 0, + "content": "To support more accessible design, our earlier work [11] introduced the vision of Empath-D, which uses a virtual reality (VR) device to provide mobile application/object designers with a realistic emulation of the interaction experience that impaired users would encounter. In this work, we present the design, implementation and validation of the Empath-D system inspired by this vision. Empath-D's goal is to allow unimpaired application designers to step into the shoes of impaired users and rapidly evaluate the usability of alternative prototypes. While we shall principally focus on empathetic evaluation of mobile applications (apps), Empath-D's design is generic enough to permit emulation of other real-world interactions-e.g., how an elderly user with cataracts and hearing loss would experience a traffic-light controlled pedestrian intersection." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.26, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.12933v1 [cs.HC] 17 Mar 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.388, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "header", + "bbox": [ + 0.792, + 0.073, + 0.914, + 0.087 + ], + "angle": 0, + "content": "Wonjung Kim et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.483, + 0.314 + ], + "angle": 0, + "content": "Empath-D's \\(^1\\) key idea is to present the user with an impairment-augmented view of the smartphone interface (or other digital objects) in a virtual world, while allowing the non-impaired user to perform natural interactions, using a physical smartphone, with a real-world instance of the smartphone app. At a high-level, Empath-D works as follows (see Figure 1): The (unimpaired) user uses a physical smartphone to perform real-world interactions (such as scrolls, taps or gestures) with the app, while wearing a VR device. The results of such interactions are projected instantaneously through the I/O interfaces (e.g., screen, speaker) of a 'virtual smartphone' visible in the VR display, but only after those I/O streams have been appropriately degraded by the specified impairment. For example, in Figure 1, the virtual phone's display (and the world view) has been appropriately vignetted, to mimic the experience of a user suffering from glaucoma." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.315, + 0.483, + 0.342 + ], + "angle": 0, + "content": "Key Challenges: To mimic impairments with adequate fidelity and usability, Empath-D must support the following features:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.344, + 0.483, + 0.455 + ], + "angle": 0, + "content": "- Fast, Accurate Multi-device Operation: Empath-D utilizes a split-interaction paradigm: a user interacts with an app using a real-world handheld smartphone, while perceiving (viewing, hearing) the app responses through the VR interface. To faithfully replicate the real-world experience, this split-mode interaction must have tight time coupling and visual fidelity (of the virtual phone's screen), comparable to direct interactions with a standalone smartphone." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.458, + 0.483, + 0.569 + ], + "angle": 0, + "content": "- Real-time Tracking: To preserve a user's perception of naturalistic interactions, Empath-D must not only capture explicit phone events, but also mirrors physical actions taken by the user (e.g., swinging the phone around or having one's hand hover over the phone). Thus, Empath-D must also track and render, in real-time, the orientation/location of both the phone and the user's hand within the VR device's field-of-view." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.574, + 0.483, + 0.643 + ], + "angle": 0, + "content": "- Lightweight Impairment Execution: To preserve the feel of natural interaction, Empath-D must insert the impairment-specific perturbations into the input/output streams with imperceptible latency or computational overhead (e.g., no reduction in video frame rate)." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.344, + 0.483, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.645, + 0.483, + 0.657 + ], + "angle": 0, + "content": "Key Contributions: We make the following major contributions:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.658, + 0.483, + 0.81 + ], + "angle": 0, + "content": "- 3-Tier Virtualisation Model: We design a novel 3-tier architecture where (i) the real-world smartphone serves merely as a tracker, forwarding user interaction events (e.g., screen touch and gestures) to a computationally powerful intermediary, after which (ii) the intermediary device perturbs those events by blending in specific input impairments (e.g., hand tremors) and passes them to an app instance running on a smartphone emulator, and finally (iii) the VR device receives the redirected outputs from this app instance and renders an appropriately-impaired (by blending in the output impairments) virtual world, including a virtual smartphone." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.814, + 0.483, + 0.87 + ], + "angle": 0, + "content": "Real-time Hand and Phone Tracking: We use an RGB-Depth camera, mounted on the head-worn VR device, to track the outline of a user's hand, and subsequently perform a lightweight but realistic 3-D rendering of the hand on the VR" + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.658, + 0.483, + 0.87 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.554, + 0.107, + 0.915, + 0.178 + ], + "angle": 0, + "content": "display. We also use fiducial marker tracking [14] by the camera to track the position/orientation of the real-world smartphone. We demonstrate our ability to achieve both high-fidelity (pointing error \\(\\leq 5\\,mm\\)) and low-latency (end-to-end delays below 120 msec) hand tracking and display." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.181, + 0.915, + 0.321 + ], + "angle": 0, + "content": "- Usability of Virtualized Phone, in Use Environments: We show that Empath-D is not just usable, but that user performance (absent impairments) using Empath-D's virtual smartphone is equivalent to real-world interaction with a smartphone. In addition, we allow usability testing of apps in their use environments, a key enabler for design of mobile applications which may be used anywhere. Our Samsung Gear VR-based prototype has end-to-end latency low-enough (only 96.3 msec of latency, excluding the mobile app emulation) to permit faithful reproduction of direct smartphone usage." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.325, + 0.915, + 0.492 + ], + "angle": 0, + "content": "- Validation of Impairment Fidelity and Overall System: We implement two distinct vision (glaucoma & cataract) and one audio (high-frequency hearing loss) impairment in our Empath-D prototype. We then conduct a set of studies using the vision impairments, where 12 participants perform a series of standardised activities (e.g., add an alarm), using both our Empath-D prototype (test) and a commercial hardware vision impairment simulator (control) and establish that the performance of users is equivalent across the test and control groups. Finally, we conduct a small-scale study to provide preliminary evidence that our empathetic approach allows developers to design accessible mobile UIs faster and better." + }, + { + "type": "list", + "bbox": [ + 0.542, + 0.181, + 0.915, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.507, + 0.75, + 0.521 + ], + "angle": 0, + "content": "2 THE EMPATH-D VISION" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.525, + 0.838, + 0.539 + ], + "angle": 0, + "content": "We use an example to illustrate the use of Empath-D:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.54, + 0.915, + 0.677 + ], + "angle": 0, + "content": "Designing for Visual Impairment. Alice is designing a mobile app that automatically magnifies text from real environments seen through its rear camera to aid people who suffer from cataracts (a condition that dims and blurs vision). Alice starts Empath-D and is presented with a web interface that allows her to customise impairments (e.g., specify the intensity of visual blur). After customising the environment, Alice clicks in the Empath-D web interface to (1) compile the environment to her phone used for VR display (VR-phone)2 and (2) connect an input/output service to a separate phone (IO-phone). She then plugs the VR-phone into the VR headset." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.678, + 0.915, + 0.844 + ], + "angle": 0, + "content": "Alice then compiles her Android app, and runs it in the Android emulator. She puts on the VR headset and holds the IO-phone in her hands. A virtual smartphone (Virt-phone) shows up in VR, tracking the real-world motion of the IO-phone. Alice now navigates through the virtual world, experiencing it as an \"impaired user, with cataracts\". She holds up IO-phone on a street corner (in the real world), and notices that the magnified text (as seen in the virtual phone in the virtual world) is not clear enough to be legible to a cataract-impaired user. She can now iteratively and rapidly modify her app, recompile it, and execute it in the Android emulator, until she is satisfied with the output. This scenario demonstrates the ease-of-use for Empath-D, with no need for special instrumentation of the app." + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.883, + 0.347, + 0.896 + ], + "angle": 0, + "content": "Video of Empath-D in action at https://is.gd/empath_d" + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.871, + 0.915, + 0.893 + ], + "angle": 0, + "content": "2The VR-phone is needed only for VR devices that require a smartphone-e.g., Samsung Gear VR" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.462, + 0.088 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "header", + "bbox": [ + 0.612, + 0.074, + 0.914, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.286, + 0.12 + ], + "angle": 0, + "content": "3 SYSTEM OVERVIEW" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.126, + 0.376, + 0.142 + ], + "angle": 0, + "content": "3.1 Design Goals and Implications" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.144, + 0.482, + 0.172 + ], + "angle": 0, + "content": "Empath-D has the following key goals, which directly influence the salient implementation choices." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.185, + 0.483, + 0.378 + ], + "angle": 0, + "content": "- Holistic emulation of impairments: For a truly empathetic experience, the app designer must perceive the effects of impairments not just while using the mobile app, but throughout her immersion in the virtual world. Consider a user, suffering from cataract, who is interacting with her smartphone while attending a dimly dit dinner gathering. Simply blurring the phone display, while leaving the background illumination and focus unchanged, might not replicate challenges in visual contrast that an impaired user would face in reality. This requirement precludes the straightforward use of I/O redirection techniques such as Rio [8], which can potentially perturb the I/O streams of only the mobile device. Instead, the impairment must be applied holistically, to the entire virtual world." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.379, + 0.483, + 0.614 + ], + "angle": 0, + "content": "- Realistic emulation of smartphone and mobile apps in the virtual world: Empath-D aims at realistically emulating mobile apps within the virtual world rendered by a commodity VR headset. Realistic emulation of mobile apps imposes two requirements. (a) First, the virtual smartphone should have sufficient visual resolution, corresponding to typical usage where the smartphone is held \\(\\approx 30\\mathrm{cm}\\) away from the eye. We shall see (in Section 6.3) that this requirement, coupled with differences in display resolutions between smartphones and VR devices, requires careful magnification of the virtual smartphone to provide legibility without hampering usage fidelity. (b) Second, the user should not perceive any lag between her user input and the rendered view of the app, seen through the VR device. Quantitatively, we thus require that the task completion time, experienced by a user interacting with the emulated application in the virtual world, should be comparable to real-world app usage on a real smartphone." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.615, + 0.483, + 0.711 + ], + "angle": 0, + "content": "- Use of unmodified app For easy and low-overhead adoption by app designers, Empath-D should support the emulation of mobile applications using the original, unmodified binaries (e.g., .apk for Android). Empath-D's requirement to support empathetic emulation without app modifications implies that app designers would be able to adopt Empath-D with minimal impact to existing development practices." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.712, + 0.483, + 0.89 + ], + "angle": 0, + "content": "- Low-latency, accurate finger tracking: This goal is an extension of the holistic emulation objective. In the real-world, users utilise instantaneous visual feedback and proprioception to move their fingers around the smartphone display, even when they are hovering but not actually touching the display. To ensure consistency between the user's tactile, visual and proprioceptive perceptions of her hand movement, Empath-D should also realistically render, in the virtual world, the user's hand movements and any changes in the position/orientation of the real-world smartphone, without any perceptible lag. In Section 6, we shall see how the Empath-D implementation meets these stringent performance bounds." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.185, + 0.483, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.104, + 0.912, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.605, + 0.399, + 0.825, + 0.413 + ], + "angle": 0, + "content": "Figure 2:Empath- \\(D\\) architecture" + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.435, + 0.932, + 0.517 + ], + "angle": 0, + "content": "- Light-weight, effective emulation of impairments: Empath-D will need to emulate impairments, at different levels of severity. For high-fidelity empathetic emulation, the insertion of such impairments in the I/O streams of the smartphone should not add generate any additional artefacts (e.g., increased latency, reduction in display refresh rate, etc.)." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.532, + 0.7, + 0.547 + ], + "angle": 0, + "content": "3.2 System Overview" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.55, + 0.914, + 0.578 + ], + "angle": 0, + "content": "We now present the overview of the Empath-D system (illustrated in Figure 2)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.578, + 0.915, + 0.756 + ], + "angle": 0, + "content": "Using Empath-D in VR. To immersively evaluate the application, the developer (or the tester) starts by installing her developed application binaries (i.e., Android .apkss) to run on the emulated smartphone. The developer then adjusts the profile settings for the impairment using Empath-D's web dashboard and selects a use case scenario (e.g., in office, in the street, etc.). She holds her physical smartphone and puts on the VR headset, earphones (when hearing impairments are involved) and experiences the immersive reality (where she can use the app - now mapped onto the physical smartphone - with the configured impairment under the designated use case scenario) that Empath-D generates. She then tests out various interfaces and functionalities of the app in the immersive VR environments." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.758, + 0.915, + 0.799 + ], + "angle": 0, + "content": "Components of Empath-D. Empath-D runs across three different physical devices: a physical smartphone, a computer, and a VR device (see Figure 2)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.8, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Smartphone: In Empath-D, the user interacts with the app using a real smartphone held in her hand. Interestingly, this smartphone does not run the app itself, but functions as a tracking device, helping to preserve the user's realistic sense of smartphone interaction. The smartphone simply redirects the user interaction events (e.g., touch events such as clicks and swipes on the display and motion events captured by inertial sensors) to the computer, which is in" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.074, + 0.386, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "header", + "bbox": [ + 0.793, + 0.074, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Wonjung Kim et al." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.107, + 0.482, + 0.148 + ], + "angle": 0, + "content": "charge of the app emulation. This smartphone also displays a fiducial marker array [14] on its display, to help in efficient, real-time tracking of the phone's location." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.149, + 0.482, + 0.246 + ], + "angle": 0, + "content": "Computer: The computer is at the heart of Empath-D's ability to fuse the real and virtual world. It consists of two major components: Phone and Hand Tracker and Mobile Emulator, as well as a Web Dashboard (see Figure 6), which allows the user to select the impairment profile to be applied. In addition, as we shall discuss shortly, this computer may run an Impairment Generator cum Virtual World Renderer). Key functions include:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.246, + 0.482, + 0.342 + ], + "angle": 0, + "content": "- The Phone and Hand Tracker, uses image captured by the VR headset-mounted camera, to track the position and pose of the smartphone (relative to the VR device), and create the virtual phone image at the correct position in the virtual world. It also uses the same camera to track the user's hand, as it interacts with the smartphone, and then renders it in the virtual world." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.348, + 0.482, + 0.444 + ], + "angle": 0, + "content": "- The Mobile Emulator executes the app being tested, using the redirected stream of user interaction events transmitted by the smartphone. The resulting visual output of the app is then transmitted as a sequence of images to the VR device, where these images will be integrated into the virtual phone object; likewise, audio output (if any) is directly streamed to the VR device." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.246, + 0.482, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.445, + 0.483, + 0.693 + ], + "angle": 0, + "content": "The overall Empath-D framework includes an Impairment Generator that is typically applied as one or more filters over the Virtual World Renderer (an engine such as Unity [44]) which is responsible for combining various virtual objects and rendering the virtual world). The Impairment Generator effectively perturbs/modifies the audio/video feeds of the virtual world, before they are displayed on the VR device. For example, to emulate cataracts, it applies an appropriate 'blurring/dimming' filter on the video feed; similarly to emulate high-frequency hearing loss (an audio impairment), this generator will apply a low-pass filter on the output audio stream. These two components are placed inside a dotted-line rectangle in Figure 2, to reflect the reality that these components run on either the Computer or the VR device, depending on whether the VR device is tethered or not. In untethered VR devices (such as the Samsung Gear VR), the Impairment Generator and the Virtual World Renderer run on the VR device itself. In contrast, tethered devices such as the HTC Vive will run on the computer, and typically offer higher graphics quality, frame rates, faster execution." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.694, + 0.483, + 0.75 + ], + "angle": 0, + "content": "VR Device: Finally, the VR device is used to display the synthesised virtual world to the user. This synthesis involves the fusion of the virtual smartphone, the user's hand and the ambient virtual world, all subject to the impairment filter." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.763, + 0.416, + 0.793 + ], + "angle": 0, + "content": "4 VR-BASED EMULATION OF MOBILE INTERACTION" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.798, + 0.483, + 0.895 + ], + "angle": 0, + "content": "Empath-D follows a split-interaction paradigm: for realistic immersion, Empath-D renders the visual and audio output of the target app in the virtual world (i.e., via VR headset's display and speakers), while allowing the user to interact naturalistically with a real-world physical phone. A major challenge in this paradigm is to enable natural, low-latency tracking and display of the real-world motion of both the phone and the user's hands, so as to ensure consistency" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.246 + ], + "angle": 0, + "content": "across the user's visual, tactile and proprioceptive experience. We achieve this by performing three distinct steps: (a) smartphone tracking, (b) hand tracking, and (c) hand rendering in VR, using an RGB-Depth (RGB-D) camera mounted on the VR headset. Empath-D first tracks the position and orientation of the physical smartphone and synchronises the position of the virtual phone to the physical smartphone (See Section 4.1). Separately, Empath-D also captures fingers in the real world and displays them at the correct position (relative to the virtual smartphone) in the virtual world (See Section 4.2 and 4.3)." + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.265, + 0.844, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.48, + 0.904, + 0.495 + ], + "angle": 0, + "content": "Figure 3: Tracking physical phone with fiducial markers" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.517, + 0.915, + 0.6 + ], + "angle": 0, + "content": "Empath-D uses the headset-mounted RGB-D camera to capture the colour image along with the depth values, relative to the camera. The camera's position is always fixed, relative to the user's head. Its three axes are thus aligned to a user's head: \\(z\\)-axis to the user's forward (gaze) direction, and \\(x\\) and \\(y\\) axes capturing the vertical and horizontal displacement." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.614, + 0.839, + 0.631 + ], + "angle": 0, + "content": "4.1 Tracking the physical smartphone" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.632, + 0.915, + 0.715 + ], + "angle": 0, + "content": "Empath-D uses fiducial markers, displayed on the physical smartphone's screen, to localise the smartphone efficiently. It takes a colour image as an input, and returns the transformation relative to the camera's coordinate system: translation and rotation, i.e., x, y, z, roll, pitch, yaw from the RGB-D camera's coordinate system. We employ a technique proposed and detailed in [14]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.715, + 0.915, + 0.895 + ], + "angle": 0, + "content": "The Empath-D Hand Tracker component tracks the physical phone using markers captured by the camera. Each marker, displayed on the phone screen, has a distinct pattern. The tracker knows the position of each marker (e.g., top-left, top-right, bottom-left and bottom-right) in the physical smartphone screen's coordinate system. The system first detects these markers in a given colour image, identifying them based on their unique patterns (see Figure 3). In particular, the system recognises the coordinates of each of the four corners of each marker. Moreover, the system knows the true size of, and separation between, each marker. It then uses an object pose estimation algorithm (provided by openCV's solvePnP function [6]), along with the array of fiducial marker points, to compute the 3-D position and orientation of the smartphone. Past" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.462, + 0.088 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "header", + "bbox": [ + 0.612, + 0.074, + 0.914, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "code_caption", + "bbox": [ + 0.085, + 0.106, + 0.292, + 0.121 + ], + "angle": 0, + "content": "Algorithm 1 Hand Segmentation" + }, + { + "type": "algorithm", + "bbox": [ + 0.094, + 0.125, + 0.482, + 0.39 + ], + "angle": 0, + "content": "1: Input: \\(T\\gets\\) Phone's translation (3-D vector) \n2: Input: \\(R\\gets\\) Phone's orientation \\((3\\times 3\\) rotation matrix), \n3: Input: \\(F\\gets\\) RGBD Frame, 2-D array that each entry \\(F_{i,j}\\) holds a color value and 3-D position relative to the camera. \n4: Input: \\(V\\gets\\) 3-D region of interest (relative to the phone) \n5: Output: fgMask, 2D bool array whose dimension equals to \\(F\\) \n6: \n7: fgMask[i,j] \\(\\leftarrow\\) false for all \\((i,j)\\) \n8: for point \\((i,j)\\) in \\(F\\) do \n9: if \\((i,j)\\) in screen_border then \n10: /\\* Case A: Blue background segmentation \\*/ \n11: fgMask[i,j] \\(\\leftarrow\\) 1-Blue \\((F_{i,j}) + 0.5\\cdot Red(F_{i,j}) > \\tau\\) \n12: else \n13: /\\* Case B: Depth-based segmentation \\*/ \n14: posphone \\(\\leftarrow\\) \\(R^{-1}\\cdot (Position(F_{i,j}) - T)\\) \n15: fgMask[i,j] \\(\\leftarrow\\) (posphone \\(\\in V\\)) \n16: end if \n17: end for \n18: return fgMask" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.43, + 0.483, + 0.458 + ], + "angle": 0, + "content": "results [14] show that this technique can compute an object's position and orientation with sub-cm level accuracy." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.458, + 0.483, + 0.584 + ], + "angle": 0, + "content": "This fiducial marker-based algorithm would fail under two conditions: (a) when the markers are occluded by the user's hand, and (b) if the ambient illumination levels are too low or too high, reducing the contrast level of the markers. To tackle (a), the smartphone screen uses an entire array of markers displayed across the scene, thereby ensuring correct smartphone tracking as long as some part of the phone is visible. Contrast concerns are not particularly relevant in our scenario, as we assume that the user is testing the app in a regularly lit work/office environment." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.599, + 0.29, + 0.615 + ], + "angle": 0, + "content": "4.2 Hand Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.617, + 0.483, + 0.756 + ], + "angle": 0, + "content": "Empath-D uses the frames captured by the RGB-D camera to track and segment the user's hand. For each frame, we extract the segment (polygon of pixels) that represents the user's hand, and render that segment in the virtual world. As the goal of hand-tracking is to provide the user with a natural view of her smartphone interactions, we restrict the tracking technique to a 3-D region of interest (ROI) that is centred at the phone, with a depth of \\(2cm\\) and a planar boundary of \\(6cm\\). In other words, we only track the hand while it is \\(\\leq 2cms\\) away from the smartphone screen, and within \\(\\leq 6cms\\) of the smartphone edges." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.756, + 0.483, + 0.894 + ], + "angle": 0, + "content": "A straightforward approach is to apply a depth-based segmentation strategy, where we first isolate only the foreground points which lie within a depth \\(= 2cm\\) of the smartphone surface. However, we empirically observed that, due to the glossy surface of the smartphone, such depth estimation was inaccurate for points located on the smartphone's screen. Accordingly, we implemented two separate segmentation methods (detailed in Algorithm 1): (case A) a colour-based segmentation approach to identify points which are directly over the smartphone, and (case B) a depth-based approach to identify points which are near, but not over, the smartphone's" + }, + { + "type": "image", + "bbox": [ + 0.544, + 0.104, + 0.707, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.546, + 0.283, + 0.702, + 0.297 + ], + "angle": 0, + "content": "Figure 4: Mesh of hand" + }, + { + "type": "image", + "bbox": [ + 0.719, + 0.104, + 0.892, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.714, + 0.283, + 0.895, + 0.311 + ], + "angle": 0, + "content": "Figure 5:Empath- \\(D\\) hand segmentation" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.338, + 0.915, + 0.379 + ], + "angle": 0, + "content": "screen. We apply the colour-based segmentation to the points inside the screen's border (thick orange contour in Figure 3) and the depth-based approach to the points outside." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.379, + 0.915, + 0.505 + ], + "angle": 0, + "content": "Colour-based segmentation: We adopt the colour-based technique proposed in [41]. The approach tests RGB values to segment foreground (hand) from background, coloured in blue. In our scenario, we target human skin as the foreground. Human skin has a property common in all races: its R value has about twice the value of G and B (\\(R \\approx 2G \\approx 2B\\)). Given the property of human skin, we obtain a formula that discriminates the foreground from the background whose \\(B\\) value is 1 (line 11 in Algorithm 1). \\(\\tau\\) is a user-tunable threshold which allows it to adapt to different lighting conditions." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.505, + 0.915, + 0.587 + ], + "angle": 0, + "content": "However, note that, to enable tracking of the phone, the phone's screen cannot be completely blue, but will need to contain the array of fiducial markers. We tackle both problems simultaneously by using blue (\\(R = 0\\), \\(G = 0\\), \\(B = 1\\)) to colour the markers, over a cyan (\\(R = 0\\), \\(G = 1\\), \\(B = 1\\)) background. Here we modified only \\(G\\) value, which is unused in the colour-based segmentation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.587, + 0.914, + 0.656 + ], + "angle": 0, + "content": "Points outside the smartphone's screen are segmented using the depth-based approach. After identifying the points corresponding to the user's hand, the system translates these points to 3-D coordinates in the camera's coordinate system, using the associated depth values." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.669, + 0.885, + 0.685 + ], + "angle": 0, + "content": "4.3 Rendering the hand in the virtual world" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.688, + 0.915, + 0.896 + ], + "angle": 0, + "content": "After detecting the hand segment, the Empath-D system renders it in the virtual world. The system passes the tracked hands to the Virtual World Renderer, sharing the (i) 3D structure of the hands (surface mesh), (ii) colour image of the RGB-D frame (texture), and (iii) mapping between the surface mesh and the colour image (UV map). In common rendering engines (e.g. Unity), the 3D structure of the hand is represented by a triangle mesh-i.e., a set of vertices, constituting individual small triangles. The mesh is rendered at the same location as the user's hand in the real world. As the user's hand is localised in the coordinates of the RGB-D depth camera, the location is offset by an additional depth value (7cm in our implementation), to reflect the additional distance between the centre of the user's eyes and the depth camera. An important characteristic of our algorithm is that we render the actual image of the user's hands over this triangle mesh. Figure 4 illustrates the Delaunay" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.387, + 0.087 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "header", + "bbox": [ + 0.793, + 0.074, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Wonjung Kim et al." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.111, + 0.286, + 0.125 + ], + "angle": 0, + "content": "Empath-D Dashboard" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.144, + 0.328, + 0.157 + ], + "angle": 0, + "content": "cataract (blur and contrast reduction)" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.166, + 0.153, + 0.174 + ], + "angle": 0, + "content": "enabled*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.183, + 0.16, + 0.19 + ], + "angle": 0, + "content": "enabled" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.198, + 0.176, + 0.207 + ], + "angle": 0, + "content": "Blur intensity*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.226, + 0.13, + 0.233 + ], + "angle": 0, + "content": "0.1" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.242, + 0.231, + 0.251 + ], + "angle": 0, + "content": "Contrast reduction intensity" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.27, + 0.124, + 0.277 + ], + "angle": 0, + "content": "1" + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.3, + 0.482, + 0.327 + ], + "angle": 0, + "content": "Figure 6: Screenshot of Empath-D impairment configuration dashboard" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.362, + 0.483, + 0.431 + ], + "angle": 0, + "content": "triangulation of a set of points. The mesh is combined with the hand's image (Figure 5), and rendered in the VR display. Extracting and rendering the actual image of the user's finger enhances the immersive feeling of real-life smartphone navigation in the virtual world." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.432, + 0.483, + 0.641 + ], + "angle": 0, + "content": "The complexity of the mesh-i.e., the number of vertices (or triangles) in the rendered hand-is an important parameter in the rendering process. A larger number of vertices captures the contours of the hand more precisely, resulting in a more life-like image. However, this also results in added rendering latency in the rendering engine. To support the twin objectives of low-latency and life-like rendering, we utilise a sub-sampling technique to construct the mesh. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a 32-fold downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the row and column axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We shall show, in Section 6, how our prototype Empath-D implementation uses this technique to achieve our twin objectives." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.655, + 0.349, + 0.669 + ], + "angle": 0, + "content": "5 IMPAIRMENT SIMULATION" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.673, + 0.483, + 0.729 + ], + "angle": 0, + "content": "Empath-D aims to enable evaluation of the usability of app designs under visual, auditory and haptic impairment simulation. Realistic simulation of various impairments in the VR world is the essential requirement to achieve this goal." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.729, + 0.483, + 0.895 + ], + "angle": 0, + "content": "There has been a thread of research to simulate impairments through physical simulator devices [1, 13, 29, 39, 49]. For instance, Zimmerman et al. use goggles and enclosing materials to simulate low vision impairments [49]. These hardware simulators generalise the impairment of interest and enable simulation of specific aspects of the impairment pathology rather than emulate exactly how an impairment is. However, impairments can vary greatly between individuals. For instance, glaucoma generally progresses in deterioration from the periphery towards the centre of vision, but in reality, it comes in different shapes and severity, affecting usability of applications in different ways. Existing physical impairment simulators simply approximate this as a central circle of" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.104, + 0.709, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.104, + 0.905, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.256, + 0.913, + 0.284 + ], + "angle": 0, + "content": "Figure 7: Simulated cataract (left) and simulated glaucoma (right)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.33, + 0.915, + 0.414 + ], + "angle": 0, + "content": "clarity, with blur through to the periphery. Empath-D is advantageous over existing physical simulators in the following ways, it allows: 1) impairments to be customised, 2) simultaneous manifestation of multiple impairments, 3) the addition of new impairments easily. Figure 6 shows the web interface for designers to customise impairments for the target user group." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.432, + 0.815, + 0.448 + ], + "angle": 0, + "content": "5.1 Simulating Visual Impairments" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.45, + 0.915, + 0.63 + ], + "angle": 0, + "content": "Vision is the dominant sensory system by which humans perceive the world, and is a key focus for Empath-D. Vision impairment is one of the most common causes of accessibility problems that comes with age. Common vision impairments include cataracts, glaucoma, and age-related macular degeneration. Such vision impairments present as reduced visual acuity, loss of central/peripheral vision, or decreased contrast sensitivity. It is widely studied that these symptoms can affect the interaction with various desktop and mobile applications; for example, humans use peripheral vision to pre-scan text ahead of his/her point of focus. As the peripheral vision narrows, the scanning becomes less effective, which slows reading [23]. In this work, we examine and simulate two commonly found visual impairments - cataracts and glaucoma." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.631, + 0.915, + 0.754 + ], + "angle": 0, + "content": "Our approach is to apply an image effect at the \"eye\" (i.e., a camera pair of view renderers) of the VR scene. From this camera pair, the image effect will apply to all other objects in the scene (e.g., smartphone, fingers, scene), just as how impaired users would experience it. We employed various image filters for different impairments, which 1) provide realism of impairments to help designers to find out usability issues and take corrective actions, and 2) have small computational overhead not to add noticeable delays to our entire emulation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.755, + 0.915, + 0.894 + ], + "angle": 0, + "content": "The approach is flexible and lightweight. Impairment simulator's intensity is configurable at runtime. The image effects are applied at the last stage of the rendering pipeline. Glaucoma presents functionally as a loss in peripheral vision. To simulate glaucoma, we use a vignette with a clear inner circle, blurred inner-outer circle, and black extending outwards from the outer circle (see Figure 7). Cataracts presents functionally as reduced visual acuity and reduced contrast sensitivity. We use a blur filter to simulate reduced visual acuity, and a contrast reduction filter to simulate reduced contrast sensitivity (see Figure 7)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.462, + 0.088 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "header", + "bbox": [ + 0.612, + 0.073, + 0.914, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "table_caption", + "bbox": [ + 0.175, + 0.105, + 0.392, + 0.12 + ], + "angle": 0, + "content": "Table 1: Hardware of Empath-D" + }, + { + "type": "table", + "bbox": [ + 0.104, + 0.133, + 0.46, + 0.235 + ], + "angle": 0, + "content": "
VR headsetSamsung Gear VR [5]
VR smartphoneSamsung Galaxy S7 [4]
RGB-D cameraIntel RealSense SR300 [20]
PCCPU: 4 cores, 3.4 GHz\nRAM: 16 GB\nGPU: GeForce GTX 1080 [32]
Physical IO smartphoneSamsung Galaxy S5 [40]
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.26, + 0.483, + 0.385 + ], + "angle": 0, + "content": "The functional aspects of vision impairments are straightforward to create in VR, which give Empath-D high extendability to implement other types of visual impairments. While we just described two impairments pertaining to our studies, it is easy to create other impairments such as colour filters to simulate colour blindness. However, we leave the effect of eye movements on impairments as the future work. Since eye-tracking is currently not supported in Empath-D, a user will need to move his head to achieve the same effect." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.397, + 0.358, + 0.413 + ], + "angle": 0, + "content": "5.2 Simulating Other Modalities" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.415, + 0.476, + 0.429 + ], + "angle": 0, + "content": "We discuss how other modalities may be simulated in Empath-D." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.43, + 0.483, + 0.566 + ], + "angle": 0, + "content": "Hand Tremors. Hand tremors are a common symptom of Parkinson's disease or Essential tremor and make it hard for one to precisely point on a touchscreen. A hand tremor may be characterised by the frequency and amplitude of oscillatory movement. Since we present virtual representations of the user's hand (i.e., as a 3D mesh) to enable his interaction with the virtual mobile phone, Empath-D similarly perturbs this 3D mesh in VR to create hand tremors. While a user may physically not experience hand movement, the visual perturbation would be sufficient to hinder accurate touch to simulate hand tremors." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.568, + 0.488, + 0.692 + ], + "angle": 0, + "content": "Hearing Loss. High-frequency hearing loss is a common symptom for the elderly population. People diagnosed with high-frequency hearing loss are unable to hear sounds between \\(2,000\\mathrm{Hz}\\) and 8,000 Hz. These people often struggle to understand or keep up with daily conversations (missing consonants in higher registers, such as the letters F and S or female voices). Empath-D applies a bandpass filter over the output sound of the target application to diminish the sound signals between \\(2\\mathrm{kHz}\\) and \\(8\\mathrm{kHz}\\) and plays the filtered audio feed through the VR device." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.705, + 0.279, + 0.719 + ], + "angle": 0, + "content": "6 IMPLEMENTATION" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.725, + 0.21, + 0.739 + ], + "angle": 0, + "content": "6.1 Hardware" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.743, + 0.483, + 0.896 + ], + "angle": 0, + "content": "We implemented our current Empath-D prototype using the hardware described in Table 1. We used the Samsung Gear VR fitted with the Samsung Galaxy S7 as the VR headset. We used the Intel RealSense SR300 RGB-D camera for finger tracking, selecting this among alternatives as: 1) its small size and low weight allowed us to easily attach it to the VR headset, and 2) its minimum sensing range is low enough to permit hand tracking at a distance of \\(30\\mathrm{cm}\\). We employed the Samsung Galaxy S5 as the physical I/O device, and a powerful laptop (4 core 3.4 GHz CPU, 16GB RAM) as the intermediary device. The choice of the VR headset itself was deliberate. We chose a Samsung Gear VR headset (an untethered" + }, + { + "type": "image", + "bbox": [ + 0.613, + 0.107, + 0.819, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.267, + 0.916, + 0.309 + ], + "angle": 0, + "content": "Figure 8: Rendering frame rate under varying virtual display resolution (width : height = 9 : 16, default resolution of Android emulator is 1080x1920)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.337, + 0.915, + 0.408 + ], + "angle": 0, + "content": "smartphone-powered VR device) over more powerful PC-tethered VR devices such as the HTC Vive or Oculus Rift. This was mainly because PC-tethered devices such as HTC Vive use IR lasers to localise the headset, which interferes with the IR laser emitted by the RGB-D camera used for depth sensing in hand tracking." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.42, + 0.791, + 0.437 + ], + "angle": 0, + "content": "6.2 Rendering an Emulated App" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.439, + 0.916, + 0.604 + ], + "angle": 0, + "content": "We used empirical studies to determine an appropriate screen resolution and frame rate to render the emulated app (and the smartphone) in the VR headset. Empath-D obtains screenshots of its mobile emulator using the Android virtual display [35] and transmits these screenshots over WiFi to the Gear VR device. The overhead of transmitting and rendering these emulated screenshots is proportional to their resolution. The default 1080p resolution could sustain a frame rate of only 18 fps, which causes visible jerkiness. To reduce this overhead, we reduced the resolution (using setDisplayProjection() method), and applied differential transmissions, sending a screenshot only when the emulated app's display changes." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.605, + 0.916, + 0.756 + ], + "angle": 0, + "content": "Figure 8 shows the experimental results on the tradeoff between the resolution and the rendering frame rate, obtained while playing a video to ensure continuous change of the screen content. The frame rate saturates at \\(57~\\text{fps}\\), at a screen resolution of \\(485\\times 863\\). Moreover, through another user study (described next) to understand the minimum resolution to read an app's contents, we empirically verified that the participants had no issues in reading the app's content at the resolution of \\(485\\times 863\\). Hence, we choose this resolution as our default, although this setting can be modified (e.g., we can pick a higher resolution, and a lower frame rate, for an app with mostly static content)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.757, + 0.916, + 0.896 + ], + "angle": 0, + "content": "If Empath-D displays the virtual smartphone at its original size in the virtual world (portrait position), its display becomes illegible. For example, the Samsung Galaxy S7 (in the Gear VR) has a resolution of \\(2560 \\times 1440\\) and an \\(\\approx 101^{\\circ}\\) horizontal field of view yielding a horizontal pixel density of \\(\\approx 25.3\\) pixels/degree. When a virtual phone is held at \\(30\\mathrm{cm}\\) away, the horizontal pixel density drops below 25.3 pixels/degree due to downsampling of the virtual phone screen as seen through the VR display. This presents a problem for viewing the content of the virtual phone - in particular, text - as its pixel density is significantly lower than when viewing a physical" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.388, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "header", + "bbox": [ + 0.793, + 0.074, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Wonjung Kim et al." + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.107, + 0.473, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.255, + 0.483, + 0.284 + ], + "angle": 0, + "content": "Figure 9: Readable font size of the virtual smartphone at a magnification ratio" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.307, + 0.482, + 0.334 + ], + "angle": 0, + "content": "phone. For instance, the Galaxy S5 gives \\(\\approx 89.4\\) pixels/degree at \\(30\\mathrm{cm}\\) distance." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.335, + 0.483, + 0.502 + ], + "angle": 0, + "content": "We tackle this issue by scaling up the virtual phone's size by a factor that ensures that the phone's display text remains legible. To determine this factor, we recruited three participants and asked them to record the minimum readable font sizes, while showing them a virtual smartphone (at a distance of \\(30~\\mathrm{cm}\\)) with various magnification ratios (increased by 0.1 from 1.0 to 2.7). Figure 9 shows that participants could read text with the font size= 12sp (the commonly used minimum font size for mobile apps) for magnification factors \\(\\geq 1.5\\). Accordingly, we used 1.5 as the default magnification ratio for the smartphone and its display. We also proportionately scaled up the user's rendered hand. User studies (Section 7) show that users found this configuration highly usable." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.513, + 0.327, + 0.529 + ], + "angle": 0, + "content": "6.3 Rendering Virtual Hand" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.532, + 0.483, + 0.797 + ], + "angle": 0, + "content": "As discussed in Section 4.3, the rendering latency of the virtual hand is proportional to the number of vertices in the Delaunay triangulation-based mesh. To reduce the latency, we apply a nonuniform sampling approach. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the \\(x\\) and \\(y\\) axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We empirically determined the sampling rate \\(X\\), by varying \\(X\\) and measuring both (i) the processing latency and (ii) the SSIM [12, 46] (Structural SIMilarity; a metric of perceived image quality) of the hand images, using 200 RGB-D frames. Figure 10 shows the results. Without any subsampling (\\(X = 0\\%\\)), the rendering latency is 311.1 msec, which is too high for our responsiveness goal. We empirically downsample the internal pixels by a factor of 32 (\\(X = 99.9\\%\\)), i.e., choosing every \\(32^{nd}\\) pixel on the grid. This results in a latency of 26.9 msec, while keeping the SSIM = 0.976, a level indistinguishable with the original as perceived by a human." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.808, + 0.327, + 0.822 + ], + "angle": 0, + "content": "6.4 Environment Emulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.826, + 0.483, + 0.897 + ], + "angle": 0, + "content": "To enable holistic evaluation of app interactions, Empath-D emulates not just the virtual phone, but the entire virtual world as well. In our current implementation, we emulated a crowded Urban Street environment, which includes crosswalks, traffic lights, pedestrians and commonplace roadside obstacles. To further mimic real-world" + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.107, + 0.887, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.258, + 0.915, + 0.285 + ], + "angle": 0, + "content": "Figure 10: Rendering latency vs. image quality of the virtual hand" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.31, + 0.915, + 0.379 + ], + "angle": 0, + "content": "movement, our implementation allows the user to navigate the virtual world by (i) rotating her head (this uses the head tracking ability of the VR device), and (ii) by 'walking in place', using the technique proposed in [45] as this does not require any additional hardware on the VR device." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.391, + 0.662, + 0.407 + ], + "angle": 0, + "content": "6.5 VR Manager" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.409, + 0.915, + 0.521 + ], + "angle": 0, + "content": "This component currently executes on the VR smartphone, and is responsible for combining the output of the various components (Hand Tracker, Phone Tracker and Virtual Phone) in the virtual world. This component, implemented as a Unity application, renders these various components. This component is also responsible for applying the impairments on the output of the virtual world. The image effects simulating low vision impairments are defined as a script, Shaders in Unity." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.532, + 0.662, + 0.546 + ], + "angle": 0, + "content": "7 EVALUATION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.55, + 0.915, + 0.676 + ], + "angle": 0, + "content": "We now present a mix of system and user experiments to evaluate the performance and efficacy of our Empath-D implementation. Besides micro-benchmark studies, we conducted two experiments to capture user interaction with Empath-D. In Experiment 1, we examine the performance of Empath-D vs. a real-world smartphone, in the absence of any impairments. In Experiment 2, we consider an impairment-augmented version of Empath-D, comparing the performance of users against the use of commercial impairment simulation hardware." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.687, + 0.84, + 0.72 + ], + "angle": 0, + "content": "7.1 Micro-benchmark Performance of Empath-D" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.722, + 0.915, + 0.765 + ], + "angle": 0, + "content": "We measured the overall latency of Empath-D, both in terms of the delay in reflecting touch interactions in the virtual world and in terms of the hand tracking delay." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.772, + 0.915, + 0.897 + ], + "angle": 0, + "content": "7.1.1 End-to-end Latency of Touch Interaction. As a measure of the overall responsiveness of Empath-D, we computed the latency between a touch input, on the physical smartphone, and the resulting change in the content of the virtual smartphone, rendered in the VR display. To measure this, we utilised a high framerate camera (operating at 240 fps) to concurrently record both the screen of the physical smartphone and the virtual phone (displayed in the VR). The phone screen is coloured green initially, and was programmed to turn red as soon as it received a touch input. We repeated the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.073, + 0.462, + 0.088 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "header", + "bbox": [ + 0.612, + 0.073, + 0.914, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.107, + 0.45, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.241, + 0.441, + 0.256 + ], + "angle": 0, + "content": "Figure 11: Overhead of impairment simulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.286, + 0.482, + 0.34 + ], + "angle": 0, + "content": "measurement 23 times, capturing (via the video frames) the time gap between (i) the physical smartphone screen turning red and (ii) the virtual smartphone turning red in the VR display. The end-to-end latency is 237.70 msec (\\(SD = 20.43\\))." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.342, + 0.483, + 0.577 + ], + "angle": 0, + "content": "By monitoring the intermediary computer, we obtained the breakdown of this delay: (i) smartphone responsiveness (the time from the user touching the screen till the time the phone transmits the touch event to the computer) \\(= 0.3\\) msec \\((SD = 0.16)\\); (ii) computer emulation responsiveness (the time from receiving the touch event till the time the screenshot of the modified display is sent to the VR device) \\(= 141.37\\) msec \\((SD = 6.6)\\), and (iii) the VR responsiveness (the time from receiving the screenshot till it is rendered on the VR display) \\(= 10.46\\) msec \\((SD = 8.36)\\). The remaining latency (\\(\\approx 87\\) msec) can be attributed as the WiFi network latency. These micro-measurements suggest that the default Android emulator used in our studies was the dominant component of the latency. The default Android emulator is known to be fairly slow, and multiple third party emulators (e.g., Genymotion [16]) are reported to provide significantly lower latency. Accordingly, we anticipate that this overall latency can be reduced to \\(\\leq 150\\) msec, without any significant architectural modification of Empath-D." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.586, + 0.483, + 0.835 + ], + "angle": 0, + "content": "7.1.2 End-to-end Latency of Virtual Hand. We also evaluated the latency between the physical movement of the user's hand and the rendering of this movement in the VR display. To capture this time difference, we displayed a small circle, at a specific point on the display, on both the smartphone and the virtual phone. Users were instructed to swipe a finger on the screen to reach the circle. We measured, over 20 experiments, the time (no. of frames from the previously used high framerate camera) between the occlusion of the circle on the physical phone and the resulting occlusion in the virtual phone, computing an average latency of \\(117.46\\mathrm{msec}\\) (\\(SD = 20.44\\)). Additionally, we measured the component delays of this rendering process as: (i) reading an RGBD frame: \\(4.90\\mathrm{msec}\\) (\\(SD = 0.58\\)); (ii) phone tracking: \\(4.56\\mathrm{msec}\\) (\\(SD = 0.25\\)); (iii) hand tracking: \\(8.0\\mathrm{msec}\\) (\\(SD = 1.58\\)), and (iv) the VR responsiveness (the time from receiving the hand mesh till it is rendered on the VR display): \\(26.99\\mathrm{msec}\\) (\\(SD = 5.22\\)). The remaining latency, attributable to the WiFi network, is \\(\\approx 73\\mathrm{msec}\\), consistent with the measurements reported above." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.849, + 0.45, + 0.865 + ], + "angle": 0, + "content": "7.2 Study Design for Usability Experiments" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.867, + 0.483, + 0.896 + ], + "angle": 0, + "content": "We then conducted user studies on the usability and real-world fidelity of our Empath-D implementation. The user study (approved" + }, + { + "type": "table_caption", + "bbox": [ + 0.536, + 0.105, + 0.892, + 0.119 + ], + "angle": 0, + "content": "Table 2: Study Tasks and Conditions in Experiment 1" + }, + { + "type": "table", + "bbox": [ + 0.537, + 0.133, + 0.89, + 0.248 + ], + "angle": 0, + "content": "
TaskCond-itionImpairmentSimulator TypeEnviro-nment
T1-T4AnonenoneReal
BCataractsPhysicalReal
CnonenoneVirtual
DCataractsVirtualVirtual
EGlaucomaRealPhysical
FGlaucomaVirtualVirtual
" + }, + { + "type": "table_caption", + "bbox": [ + 0.529, + 0.262, + 0.9, + 0.276 + ], + "angle": 0, + "content": "Table 3: Smartphone Interaction Tasks in Experiment 1" + }, + { + "type": "table", + "bbox": [ + 0.543, + 0.289, + 0.884, + 0.392 + ], + "angle": 0, + "content": "
Task TypeTask CodeTask Description
Everyday Phone UseT1Perform a Calculation
T2Add an Alarm
T3Search, Save Image on Browser
Controlled PointingT4Number Search and Point
" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.412, + 0.915, + 0.453 + ], + "angle": 0, + "content": "by our institution's IRB) consisted of 12 users (9 males) with no pre-existing uncorrected vision impairments. Users were aged 24-39, with a mean age of 30.3 years \\((\\mathrm{SD} = 5)\\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.453, + 0.915, + 0.578 + ], + "angle": 0, + "content": "Study Tasks and Measures. We adopted a repeated measures design, with participants counterbalanced for condition order (see Table 2 for the conditions). Participants were asked to perform four different tasks split into two task types; everyday phone use, and controlled pointing (see Table 3). Users were asked to perform all tasks using two-handed interaction, holding the phone at a distance that they normally would during daily use. We chose two-handed interaction to eliminate for phone balancing that is typical in one-handed interaction given the typical size of today's smartphones." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.578, + 0.915, + 0.813 + ], + "angle": 0, + "content": "T1-T3 are everyday tasks users perform on a smartphone. They cover smartphone touch interaction of taps, swipes, and long press, on UI widgets such as keyboards, buttons and scrolling content. Users were asked to experience performing these tasks under six conditions, including under impairments (both using the physical hardware and the VR device). At the end of all three tasks (T1-T3), users completed the NASA-TLX[18] survey to indicate their perceived workload during task performance. T4, on the other hand, is a controlled pointing task experiment. Participants were given a stimulus number and then asked to click on the button with the corresponding number, as quickly and as precisely as they could. (See Figure 12 for a screenshot of the application used in this task.) Users repeated this task 80 times in succession, for each of the six conditions (A-F; see Table 2). We recorded the touch times and positions with the task app. We conducted a short semi-structured interview at the end of the study to understand users' experiences with, and perceptions of, the physical and virtual impairment simulations." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.813, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Instruments: We compared Empath-D with a commercial physical impairment simulator [13]. To calibrate for visual acuity, we adapted a test similar to a Snellen eye test chart [42] - showing rows of letters with each lower row having a smaller font size. We first used the physical impairment simulator to obtain the minimum acceptable font size. Using the same test page in the VR, we applied" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.387, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "header", + "bbox": [ + 0.792, + 0.073, + 0.914, + 0.087 + ], + "angle": 0, + "content": "Wonjung Kim et al." + }, + { + "type": "text", + "bbox": [ + 0.268, + 0.12, + 0.298, + 0.141 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.177, + 0.361, + 0.32 + ], + "angle": 0, + "content": "
271613
251815
232026
121417
211119
221024
" + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.335, + 0.483, + 0.362 + ], + "angle": 0, + "content": "Figure 12: Screenshot of a test application for the pointing task" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.402, + 0.483, + 0.554 + ], + "angle": 0, + "content": "the impairment and gradually adjusted the severity until we hit the minimum acceptable font size. To calibrate the inner circle of clarity for glaucoma, we implemented an app that allows us to adjust the diameter of a coloured circle. We then used the physical impairment simulator for glaucoma, and adjusted the coloured circle to the point in which the circle reaches the fringe for clarity. We then calibrated the virtual glaucoma simulation in a similar manner. Three independent measurements for visual acuity and circle of clarity were taken from the research team and averaged to determine the final calibration parameters of font size \\(= 12\\) sp and diameter \\(= 60\\) mm." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.571, + 0.414, + 0.587 + ], + "angle": 0, + "content": "7.3 Empath-D vs. Physical Smartphone" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.589, + 0.483, + 0.645 + ], + "angle": 0, + "content": "We first investigate whether the VR-based interaction is a sufficiently faithful replica of the real-world interaction that a user would have with a regular smartphone, in the absence of any impairments." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.645, + 0.483, + 0.811 + ], + "angle": 0, + "content": "Touch Accuracy: In all six conditions, users were able to achieve high levels of button touch accuracy (see Table 4), with the accuracy being \\(98.8\\%\\) (\\(SD = 1.67\\)) when the users interacted unimpaired with the VR device. Comparing the accuracies between the physical smartphone and the VR device, we noted that the VR condition had an accuracy of \\(99.12\\%\\) (\\(SD = 1.32\\)) (across all 6 conditions), whereas the use of the physical smartphone provided \\(100\\%\\) accuracy. In terms of the location accuracy, we noted a difference of \\(2.28 \\, \\text{mm}\\) (\\(SD = 2.98\\)) between the use of Empath-D vs. a physical smartphone. This difference is well within the uncertainty associated with finger touch interactions, and thus demonstrates that user performance was equivalent across both Empath-D and a physical smartphone." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.811, + 0.483, + 0.895 + ], + "angle": 0, + "content": "Perceived Workload: NASA-TLX scores indicated that the users did perceive significant differences in their workload using Empath-D, compared to use of the physical smartphone (\\(Z = 2.824\\), \\(p = 0.005 < 0.05\\)). This does suggest that the navigating an app within the VR device does require greater cognitive effort than simply interacting with a regular smartphone. However, it is difficult to" + }, + { + "type": "table_caption", + "bbox": [ + 0.542, + 0.105, + 0.887, + 0.119 + ], + "angle": 0, + "content": "Table 4: Accuracy of Button Touch Across All Users" + }, + { + "type": "table", + "bbox": [ + 0.551, + 0.133, + 0.88, + 0.235 + ], + "angle": 0, + "content": "
ImpairmentEnvironmentAccuracy (SD) %
NonePhysical100
Virtual98.79 (1.67)
CataractsPhysical100
Virtual99.09 (1.36)
GlaucomaPhysical100
Virtual99.49 (0.82)
" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.27, + 0.914, + 0.297 + ], + "angle": 0, + "content": "decipher whether this difference is due to Empath-D-specific issues, or a general lack of familiarity with VR devices." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.298, + 0.915, + 0.436 + ], + "angle": 0, + "content": "We additionally investigated the subjective feedback captured by the semi-structured interview. \\(83\\%\\) (10) of the users reported perceiving increased latency while using Empath-D, while 2 users indicated that they felt no noticeable latency difference. However, all 12 users indicated that the performance of Empath-D was \"acceptable\", and they would be able to use the Empath-D system for testing the usability of apps, as long as the apps do not require extremely low-latency interactions. (3 users indicated that the system might not be usable for testing real-time games.)" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.452, + 0.857, + 0.482 + ], + "angle": 0, + "content": "7.4 Empath-D vs. Hardware Impairment Simulators" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.486, + 0.915, + 0.556 + ], + "angle": 0, + "content": "We now study the performance of Empath-D vis-a-vis impairments generated using commercially available hardware. Figure 11 shows the overhead of Empath-D under impairment conditions, demonstrating that Empath-D is able to operate without significant performance loss even in the presence of impairments." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.556, + 0.915, + 0.763 + ], + "angle": 0, + "content": "Touch Accuracy: Table 4 enumerates the accuracy for the pointing task (T4) for two distinct impairments (Cataract & Glaucoma), for both the VR-based Empath-D system and the hardware impairment simulator. We see that, in the Cataract condition, Empath-D had a mean accuracy of \\(99.09\\%\\), which is virtually indistinguishable from that of the hardware device (\\(100\\%\\)). A similar pattern was observed for the Glaucoma impairment (\\(99.49\\%\\) for Empath-D vs. \\(100\\%\\) for Hardware). In terms of the location accuracy, we noted a difference of \\(1.7 \\, \\text{mm}\\) (\\(SD = 1.9\\)) (for Cataract) and \\(1.2 \\, \\text{mm}\\) (\\(SD = 1.6\\)) (for Glaucoma) between the use of Empath-D vs. the impairment hardware. Once again, this difference is well within the uncertainty associated with finger touch interactions. These results provide strong evidence that Empath-D is able to emulate impairment conditions that are equivalent to that of dedicated, commercial hardware." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.763, + 0.915, + 0.819 + ], + "angle": 0, + "content": "Perceived Workload: The numerical TLX scores indicated that there was no significant difference for Cataracts; however, the difference for Glaucoma was significant \\((Z = 3.061\\), \\(p = 0.002 < 0.05)\\) with users indicating a higher perceived workload for the VR device." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.835, + 0.692, + 0.848 + ], + "angle": 0, + "content": "7.5 Motion sickness" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.853, + 0.914, + 0.895 + ], + "angle": 0, + "content": "At the end of the user study, we asked each participant if they felt discomfort or unwell. Only two of the twelve participants reported slight motion sickness while using Empath-D. Motion sickness may" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.074, + 0.461, + 0.088 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "header", + "bbox": [ + 0.612, + 0.074, + 0.913, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.108, + 0.482, + 0.136 + ], + "angle": 0, + "content": "arise from: (1) the use of the VR display itself, and (2) the latency from Empath-D. However, it is difficult to separate the two." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.136, + 0.482, + 0.219 + ], + "angle": 0, + "content": "The effects of motion sickness are notably minor in our current prototype of Empath-D. The nature of our experimentation intensifies the use of the VR display, whereas practical use of Empath-D is likely to be more interspersed between app redesigns. We further discuss how we may improve on latency in Section 9.2 to reduce motion sickness that may result from the latency of Empath-D." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.247, + 0.258, + 0.26 + ], + "angle": 0, + "content": "8 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.265, + 0.483, + 0.486 + ], + "angle": 0, + "content": "Designing for Inclusiveness. Newell et al. [31] pointed out that traditional user-centred design techniques provide little guidance for designing interfaces for elderly and disabled users due to the large variation amongst the type and degree of impairments. They also highlighted that the standard guidelines for designing disabled-friendly UIs are too general [30] and lacked empathy for users. For instance the WCAG 2.0 lists that the use of colour \"is not used as the only visual means of conveying information, indicating an action, prompting a response or distinguishing a visual element\". This requires interpretation by the designer into specific designs in his application. Over the years, various accessibility design guidelines (such as WCAG 2.0 [3], IBM Accessibility Checklist [38], US Section 508 Standards [2]) and tools (aChecker [15]) have been proposed and refined. However, the problems pointed out by Newell are remained unsolved to a large extent, which hinders elaborate design for a target user group with a specific impairment." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.487, + 0.483, + 0.625 + ], + "angle": 0, + "content": "Simulated Design. There exists prior work on helping UI designers design better interfaces for people suffering from vision impairments. Higuchi et al. [19] proposed a tool to simulate the visual capabilities of the elderly for the design of control panels, while Mankoff et al. [26] developed a tool to simulate a user with visual and motor impairments on the desktop screen. SIMVIZ [9, 47] uses the Oculus Rift VR device to simulate visual impairments to examine reading text on a smartphone. For audio modalities, Werfel et al. [47] simulated hearing ailments by using a pair of microphones with equalised headphones." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.625, + 0.483, + 0.708 + ], + "angle": 0, + "content": "Different from prior works, Empath-D uses VR as the medium for immersive evaluation to 1) flexibly support wider groups of impaired users, and 2) allow naturalistic interactions with a mobile phone in a virtual environment. This novel approach supports ecological validity in testing applications and is key for mobile apps which go beyond the static settings of previous work." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.708, + 0.483, + 0.763 + ], + "angle": 0, + "content": "While previous work has focused on simulation in single modality (visual or auditory), Empath-D is able to flexibly combine modalities to support any application type, ailment (visual, auditory, motor) and usage environment." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.763, + 0.483, + 0.888 + ], + "angle": 0, + "content": "System Support for Accessibility. Modern mobile OSes provide accessibility support; in particular, it allows users with far-sightedness to increase fonts and users with blindness to interact through vocal interfaces. Also, Zhong et al. enhanced Android accessibility for users with hand tremor by reducing fine pointing and steady tapping [48]. We believe Empath-D will significantly expand basic accessibility support of commodity devices and accelerates the design and deployment of various accessibility add-ons for different impaired users." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.108, + 0.915, + 0.26 + ], + "angle": 0, + "content": "Testing of Mobile Applications. Recently there have been many systems, such as VanarSena [37], AMC [22], Puma [17], DynoDroid [25], DECAF [24], AppsPlayground [36], for automatically testing and identifying various types of UI and systems bugs in mobile applications. Empath-D takes a different approach in that we do not detect bugs after the application is developed and deployed. Instead, we allow the designer to test early iterations of the designs rapidly. In this way, we hope to reduce the pain of having to make significant UI changes at the end of the design cycle - or worse, end with an application that cannot be used effectively by the target impaired demographic." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.277, + 0.652, + 0.291 + ], + "angle": 0, + "content": "9 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.295, + 0.915, + 0.351 + ], + "angle": 0, + "content": "Our current studies indicate the considerable promise of Empath-D, as a mechanism for rapid and empathetic evaluation of app usability. We now discuss some additional studies and issues that we intend to explore further." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.369, + 0.771, + 0.384 + ], + "angle": 0, + "content": "9.1 User study with Designers" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.387, + 0.915, + 0.608 + ], + "angle": 0, + "content": "We conducted a short user study with two mobile app developers to qualitatively examine Empath-D in actual use. Both developers have previously worked to create an Android mobile application, which was used as the baseline for the study. The developers were tasked with redesigning the mobile app for the glaucoma-impaired under two conditions: 1) without Empath-D, but with materials describing glaucoma and showing functionally accurate examples of glaucoma, and 2) with the same materials, and Empath-D. Both developers agreed that Empath-D helped them improve their designs over the baseline condition. The developers reported that Empath-D allowed them to improve their designs in two ways: 1) they can focus their attention on re-designing particular problematic parts of the UI, and 2) they are able to appropriately calibrate their modifications (for instance increasing the font size may help, but text that is too large will also cause glaucoma sufferers to visually scan more, causing fatigue)." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.626, + 0.787, + 0.642 + ], + "angle": 0, + "content": "9.2 Dealing with Latency Issues" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.644, + 0.915, + 0.824 + ], + "angle": 0, + "content": "Our experimental studies indicate that users are able to utilise Empath-D effectively for \"conventional\" apps—i.e., those that typically involve sporadic interaction by users with UI elements, such as buttons and keyboards. The current end-to-end latency (of \\(\\approx\\) 200 msec) is not an impediment for high-fidelity evaluation of such apps. However, the participants also indicated that this latency (lag between user actions and rendering in the VR display) would pose a problem for highly latency-sensitive applications, such as games. At present, it is thus appropriate to state that Empath-D potentially needs additional optimisations to support such applications. The most obvious improvement would be to replace the default Android emulator with a faster, custom emulation engine—this is likely to reduce \\(\\approx\\) 100 msec of the delay budget." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.825, + 0.915, + 0.895 + ], + "angle": 0, + "content": "The current implementation streams JPEG images (hand, emulator's screen) from the intermediary computer to the VR smartphone. We plan to adopt a low-latency video streaming codec such as H.265 HEVC [43], which would help reduce networking and rendering latency. OS-level optimisations (e.g., preemptive priority" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.073, + 0.388, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "header", + "bbox": [ + 0.793, + 0.073, + 0.914, + 0.087 + ], + "angle": 0, + "content": "Wonjung Kim et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.108, + 0.482, + 0.135 + ], + "angle": 0, + "content": "for inter-component messages) may be needed to support even lower latency." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.135, + 0.482, + 0.192 + ], + "angle": 0, + "content": "Recently, several works have proposed techniques for achieving high-quality VR experience on mobile devices [7, 10, 21]. Empath- \\(D\\) could borrow some techniques to improve latency and video quality." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.204, + 0.414, + 0.218 + ], + "angle": 0, + "content": "9.3 User Performance with VR Devices" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.222, + 0.483, + 0.401 + ], + "angle": 0, + "content": "Moreover, our user studies also indicated that the time for performing tasks (T1-T4) was marginally higher when using the VR environment, compared to the direct use of a real-world smartphone. More specifically, for the pointing task T4, there was an average difference of 654 msec in the task completion time using Empath-D, compared to the smartphone. In addition, anecdotal comments suggest that continued use of the VR device, for longer-lived sessions, might pose additional usability challenges. For example, a couple of users indicated some minor muscle fatigue, most likely as a result of using a 'heavy' VR device. It is an open question whether these issues will be mitigated over time, as VR devices become lighter and more ergonomic, and as users have greater familiarity with the use of VR devices." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.416, + 0.36, + 0.431 + ], + "angle": 0, + "content": "9.4 Advanced Uses of Empath-D" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.434, + 0.483, + 0.656 + ], + "angle": 0, + "content": "Our current implementation of Empath-D supports the virtualisation of certain output modalities (specifically the display and audio) of the emulated app. The vision of Empath-D can be extended to create other richer interaction modes, often blending virtual and augmented reality (AR) settings. As an example, certain emulation conditions may need to generate and integrate synthetic sensor traces, to replace the real sensor traces from the smartphone-e.g., to mimic the user's movement in locations, such as forests and mountains, the phone's real GPS trace would need to be replaced by a synthetic GPS trace as in [27, 28]. Similarly, in some cases, the app itself might need to take inputs from the VR world-e.g., if the app was being used to magnify certain objects embedded in the VR world. While such use cases can be supported, they will require enhancements to the current Empath-D framework, and it is likely that the implementation may surface additional challenges, in terms of computational overhead and latency." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.669, + 0.48, + 0.685 + ], + "angle": 0, + "content": "9.5 Developing Impairment Filters and Profiles" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.687, + 0.483, + 0.826 + ], + "angle": 0, + "content": "To demonstrate the viability of Empath-D, we focused on demonstrating the ability to simulate visual impairments and in particular cataracts and glaucoma. As we explored, these impairments have functional aspects that are commonly employed to characterise them, such as visual acuity or contrast sensitivity, and are often accompanied by standard tests such as the Snellen eye test chart [42] and Pelli-Robson contrast sensitivity chart [34] respectively. From examining the commercial physical impairment simulator and our experimentation, we believe that Empath-D has the ability to functionally simulate other impairments." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.826, + 0.483, + 0.897 + ], + "angle": 0, + "content": "We recognise two important directions that Empath-D needs address to improve impairment simulation and use. First, impairment filters have to be developed in concert with medical professionals who are subject matter experts in the areas of the specific pathologies. This helps to develop a library of impairment filters. Second," + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.108, + 0.915, + 0.219 + ], + "angle": 0, + "content": "with verified impairment filters, we may create impairment profiles, which characterise groups of users with possibly overlapping requirements. For instance, a hypothetical impairment profile may calibrate for a demographic of a range of ages, sex, and percentage of the population who may have myopia and cataracts—both which affect visual acuity. With impairment profiles, app developers may easily select and understand the demographic to which they are designing for." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.231, + 0.673, + 0.244 + ], + "angle": 0, + "content": "10 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.249, + 0.915, + 0.54 + ], + "angle": 0, + "content": "We presented the design and evaluation of Empath-D, a framework that allows app developers to 'step into the shoes' of impaired users, and perform an empathetic evaluation of their app interfaces. Our key idea is to utilise a virtual world (using a commodity VR device) to present an impaired view of the app's interface, while allowing the user to interact naturally with a real commodity smartphone in the physical world. Overcoming the current computational limitations (of the VR device and the Android emulator) required us to make careful system choices, such as (i) appropriate tradeoffs between the resolution and frame rate for rendering the virtual smartphone, (ii) subsampling of the mesh representing the user's hand and (iii) scaling up the size of the virtual smartphone to overcome the lower resolution of the VR device. User studies show that Empath-D is effective in (a) providing usability that is equivalent to using a real app (on a real smartphone), for applications that do not require ultra-low latency and (b) emulating impairments in a similar fashion to custom hardware devices. We believe that Empath-D can be a powerful new paradigm for effective bidirectional integration between real-world user actions and virtual worlds, and that this can enable additional immersive applications beyond just 'impairment emulation'." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.553, + 0.753, + 0.567 + ], + "angle": 0, + "content": "11 ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.572, + 0.914, + 0.683 + ], + "angle": 0, + "content": "We are thankful to our shepherd Prof. Xia Zhou and all anonymous reviewers for their valuable reviews. This research is supported partially by Singapore Ministry of Education Academic Research Fund Tier 2 under research grant MOE2014-T2-1063, and by the National Research Foundation, Prime Minister's Office, Singapore under its IDM Futures Funding Initiative. All findings and recommendations are those of the authors and do not necessarily reflect the views of the granting agency, or SMU." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.696, + 0.635, + 0.709 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.713, + 0.914, + 0.734 + ], + "angle": 0, + "content": "[1] [n. d.]. AGNES (Age Gain Now Empathy Systems. ([n. d.]). Retrieved 2018-04-13 from http://agelab.mit.edu/agnes-age-gain-now-empathy-system" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.734, + 0.915, + 0.753 + ], + "angle": 0, + "content": "[2] [n. d]. US Section 508 Standards. ([n. d]). Retrieved 2018-04-13 from https: //www.section508.gov/" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.754, + 0.915, + 0.774 + ], + "angle": 0, + "content": "[3] 2008. Web Content Accessibility Guidelines (WCAG) 2.0. (11 December 2008). Retrieved 2018-04-13 from https://www.w3.org/TR/WCAG20/" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.774, + 0.915, + 0.794 + ], + "angle": 0, + "content": "[4] 2016. Samsung Galaxy S7 Specifications. (2016). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/galaxy-s7/#!/spec" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.794, + 0.915, + 0.814 + ], + "angle": 0, + "content": "[5] 2017. Samsung Gear VR Specifications. (2017). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/gear-vr/specs/" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.814, + 0.915, + 0.845 + ], + "angle": 0, + "content": "[6] 2018. SolvePnP, Camera Calibration and 3D Reconstruction, OpenCV. (2018). Retrieved 2018-04-13 from https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.845, + 0.915, + 0.895 + ], + "angle": 0, + "content": "[7] Omid Abari, Dinesh Bharadia, Austin Duffield, and Dina Katabi. 2017. Enabling High-Quality Untethered Virtual Reality. In 14th USENIX Symposium on Networked Systems Design and Implementation (NSDI 17). USENIX Association, Boston, MA, 531-544. https://www.usenix.org/conference/nsdi17/technical-sessions/presentation/abari" + }, + { + "type": "list", + "bbox": [ + 0.522, + 0.713, + 0.915, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.074, + 0.462, + 0.088 + ], + "angle": 0, + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + }, + { + "type": "header", + "bbox": [ + 0.612, + 0.074, + 0.913, + 0.088 + ], + "angle": 0, + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.109, + 0.484, + 0.16 + ], + "angle": 0, + "content": "[8] Ardalan Amiri Sani, Kevin Boos, Min Hong Yun, and Lin Zhong. 2014. Rio: A System Solution for Sharing I/O Between Mobile Systems. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 259-272. https://doi.org/10.1145/2594368.2594370" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.16, + 0.484, + 0.211 + ], + "angle": 0, + "content": "[9] Halim Cagri Ates, Alexander Fiannaca, and Eelke Folmer. 2015. Immersive Simulation of Visual Impairments Using a Wearable See-through Display. In Proceedings of the Ninth International Conference on Tangible, Embedded, and Embodied Interaction (TEI '15). ACM, New York, NY, USA, 225-228. https://doi.org/10.1145/2677199.2680551" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.211, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[10] Kevin Boos, David Chu, and Eduardo Cuervo. 2016. FlashBack: Immersive Virtual Reality on Mobile Devices via Rendering Memozoation. In Proceedings of the 14th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '16). ACM, New York, NY, USA, 291-304. https://doi.org/10.1145/2906388.2906418" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.261, + 0.482, + 0.312 + ], + "angle": 0, + "content": "[11] Kenny Tsu Wei Choo, Rajesh Krishna Balan, Tan Kiat Wee, Jagmohan Chauhan, Archan Misra, and Youngki Lee. 2017. Empath-D: Empathetic Design for Accessibility. In Proceedings of the 18th International Workshop on Mobile Computing Systems and Applications (HotMobile '17). ACM, New York, NY, USA, 55-60. https://doi.org/10.1145/3032970.3032981" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.482, + 0.362 + ], + "angle": 0, + "content": "[12] Eduardo Cuervo, Alec Wolman, Landon P. Cox, Kiron Lebeck, Ali Razeen, Stefan Saroiu, and Madanlal Musuvathi. 2015. Kahawai: High-Quality Mobile Gaming Using GPU Offload. In Proceedings of the 13th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '15). ACM, New York, NY, USA, 121-135. https://doi.org/10.1145/2742647.2742657" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.362, + 0.482, + 0.382 + ], + "angle": 0, + "content": "[13] Marshall Flax. 2018. Low Vision Simulators. (2018). Retrieved 2018-04-13 from https://www.lowvisionsimulators.com/" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[14] S. Garrido-Jurado, R. Mu noz Salinas, F.J. Madrid-Cuevas, and M.J. Marin-Jiménez. 2014. Automatic generation and detection of highly reliable fiducial markers under occlusion. Pattern Recognition 47, 6 (2014), 2280-2292. https://doi.org/10.1016/j.patcog.2014.01.005" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.422, + 0.482, + 0.463 + ], + "angle": 0, + "content": "[15] Greg Gay and Cindy Qi Li. 2010. AChecker: Open, Interactive, Customizable, Web Accessibility Checking. In Proceedings of the 2010 International Cross Disciplinary Conference on Web Accessibility (W4A) (W4A '10). ACM, New York, NY, USA, Article 23, 2 pages. https://doi.org/10.1145/1805986.1806019" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.463, + 0.482, + 0.482 + ], + "angle": 0, + "content": "[16] Genymotion. [n. d.]. Genymotion Android Emulator. ([n. d.]). Retrieved 2018-04-13 from https://www.genymotion.com/" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.482, + 0.482, + 0.533 + ], + "angle": 0, + "content": "[17] Shuai Hao, Bin Liu, Suman Nath, William G.J. Halfond, and Ramesh Govindan. 2014. PUMA: Programmable UI-automation for Large-scale Dynamic Analysis of Mobile Apps. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 204-217. https://doi.org/10.1145/2594368.2594390" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.533, + 0.482, + 0.563 + ], + "angle": 0, + "content": "[18] Sandra G Hart and Lowell E Staveland. 1988. Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research. In Advances in psychology. Vol. 52. Elsevier, 139-183." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.563, + 0.482, + 0.604 + ], + "angle": 0, + "content": "[19] Kazunori Higuchi, Yasuo Sakaguchi, Kazuhiko Sugiyama, and Tomoaki Nakano. 1999. Simulating the human vision of elderly for designing control panels. In Systems, Man, and Cybernetics, 1999. IEEE SMC'99 Conference Proceedings. 1999 IEEE International Conference on, Vol. 5. IEEE, 703-708." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.604, + 0.482, + 0.635 + ], + "angle": 0, + "content": "[20] Intel. 2016. Intel®RealSense™ Camera SR300 Product Specifications. (2016). Retrieved 2018-04-13 from https://ark.intel.com/products/92329/Intel-RealSense-Camera-SR300" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.635, + 0.482, + 0.685 + ], + "angle": 0, + "content": "[21] Zeci Lai, Y. Charlie Hu, Yong Cui, Linhui Sun, and Ningwei Dai. 2017. Furion: Engineering High-Quality Immersive Virtual Reality on Today's Mobile Devices. In Proceedings of the 23rd Annual International Conference on Mobile Computing and Networking (MobiCom '17). ACM, New York, NY, USA, 409-421. https://doi.org/10.1145/3117811.3117815" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.685, + 0.482, + 0.735 + ], + "angle": 0, + "content": "[22] Kyungmin Lee, Jason Flinn, T.J. Giuli, Brian Noble, and Christopher Peplin. 2013. AMC: Verifying User Interface Properties for Vehicular Applications. In Proceeding of the 11th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '13). ACM, New York, NY, USA, 1-12. https://doi.org/10.1145/2462456.2464459" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.735, + 0.482, + 0.766 + ], + "angle": 0, + "content": "[23] Gordon E Legge, Sing-Hang Cheung, Deyue Yu, Susana TL Chung, Hye-Won Lee, and Daniel P Owens. 2007. The case for the visual span as a sensory bottleneck in reading. Journal of Vision 7, 2 (2007), 9-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.766, + 0.482, + 0.816 + ], + "angle": 0, + "content": "[24] Bin Liu, Suman Nath, Ramesh Govindan, and Jie Liu. 2014. DECAF: Detecting and Characterizing Ad Fraud in Mobile Apps. In 11th USENIX Symposium on Networked Systems Design and Implementation (NSDI 14). USENIX Association, Seattle, WA, 57-70. https://www.usenix.org/conference/nsdi14/technical-sessions/presentation/liu_bin" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.816, + 0.482, + 0.857 + ], + "angle": 0, + "content": "[25] Aravind Machiry, Rohan Tahiliani, and Mayur Naik. 2013. Dynodroid: An Input Generation System for Android Apps. In Proceedings of the 2013 9th Joint Meeting on Foundations of Software Engineering (ESEC/FSE 2013). ACM, New York, NY, USA, 224-234. https://doi.org/10.1145/2491411.2491450" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.857, + 0.482, + 0.887 + ], + "angle": 0, + "content": "[26] Jennifer Mankoff, Holly Fait, and Ray Juang. 2005. Evaluating accessibility by simulating the experiences of users with vision or motor impairments. IBM Systems Journal 44, 3 (2005), 505-517." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.109, + 0.484, + 0.887 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.109, + 0.914, + 0.17 + ], + "angle": 0, + "content": "[27] Chulhong Min, Seungchul Lee, Changhun Lee, Youngki Lee, Seungwoo Kang, Seungpyo Choi, Wonjung Kim, and Junehwa Song. 2016. PADA: Power-aware Development Assistant for Mobile Sensing Applications. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '16). ACM, New York, NY, USA, 946-957. https://doi.org/10.1145/2971648.2971676" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.17, + 0.914, + 0.231 + ], + "angle": 0, + "content": "[28] Chulhong Min, Youngki Lee, Chungkuk Yoo, Seungwoo Kang, Sangwon Choi, Pillsoon Park, Inseok Hwang, Younghyun Ju, Seungpyo Choi, and Junehwa Song. 2015. PowerForecaster: Predicting Smartphone Power Impact of Continuous Sensing Applications at Pre-installation Time. In Proceedings of the 13th ACM Conference on Embedded Networked Sensor Systems (SenSys '15). ACM, New York, NY, USA, 31-44. https://doi.org/10.1145/2809695.2809728" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.231, + 0.914, + 0.261 + ], + "angle": 0, + "content": "[29] Produkt + Projekt Wolfgang Moll. [n. d.]. Age simulation suit GERT - the GERontic Test suit. ([n. d.]). Retrieved 2018-04-13 from http://www.age-simulation-suit.com/" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.261, + 0.914, + 0.281 + ], + "angle": 0, + "content": "[30] Alan Newell and Peter Gregor. 1988. Human computer interaction for people with disabilities. (1988)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.281, + 0.914, + 0.312 + ], + "angle": 0, + "content": "[31] Alan F Newell, Peter Gregor, Maggie Morgan, Graham Pullin, and Catriona Macaulay. 2011. User-sensitive inclusive design. Universal Access in the Information Society 10, 3 (2011), 235-243." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.312, + 0.914, + 0.342 + ], + "angle": 0, + "content": "[32] Nvidia. 2016. GeForce GTX 1080 Specifications. (2016). Retrieved 2018-04-13 from https://www.geforce.com/hardware/Desktop-gpus/geforce-gtx-1080/ specifications" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.342, + 0.914, + 0.372 + ], + "angle": 0, + "content": "[33] National Institute on Aging. 2016. World's older population grows dramatically. (28 March 2016). Retrieved 2018-04-13 from https://www.nih.gov/news-events/news-releases/worlds-older-population-grows-dramatically" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.372, + 0.914, + 0.392 + ], + "angle": 0, + "content": "[34] DG Pelli, JG Robson, et al. 1988. The design of a new letter chart for measuring contrast sensitivity. In Clinical Vision Sciences. CiteSeer." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.392, + 0.914, + 0.422 + ], + "angle": 0, + "content": "[35] Android Open Source Project. 2017. SurfaceFlinger and HardwareComposer. (March 2017). Retrieved 2018-04-13 from https://source.android.com/devices/ graphics/arch-sf-hwc" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.422, + 0.914, + 0.463 + ], + "angle": 0, + "content": "[36] Vaibhav Rastogi, Yan Chen, and William Enck. 2013. AppsPlayground: Automatic Security Analysis of Smartphone Applications. In Proceedings of the Third ACM Conference on Data and Application Security and Privacy (CODASPY '13). ACM, New York, NY, USA, 209-220. https://doi.org/10.1145/2435349.2435379" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.463, + 0.914, + 0.513 + ], + "angle": 0, + "content": "[37] Lenin Ravindranath, Suman Nath, Jitendra Padhye, and Hari Balakrishnan. 2014. Automatic and Scalable Fault Detection for Mobile Applications. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 190-203. https://doi.org/10.1145/2594368.2594377" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.513, + 0.914, + 0.543 + ], + "angle": 0, + "content": "[38] IBM Accessibility Research. 2017. IBM Accessibility Checklist 7.0. (18 July 2017). Retrieved 2018-04-13 from http://www-03.ibm.com/able/guidelines/ci162/accessibility_checklist.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.543, + 0.914, + 0.573 + ], + "angle": 0, + "content": "[39] Justin B. Rousek, Sonja Koneczny, and M. Susan Hallbeck. 2009. Simulating Visual Impairment to Detect Hospital Wayfinding Difficulties. Proceedings of the Human Factors and Ergonomics Society Annual Meeting 53, 8 (Oct. 2009), 531-535." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.573, + 0.914, + 0.603 + ], + "angle": 0, + "content": "[40] Samsung. 2014. Samsung Galaxy S5 Specifications. (2014). Retrieved 2018-04-13 from http://www.samsung.com/uk/smartphones/galaxy-s5-g900f/SM-G900FZKABTU/" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.603, + 0.914, + 0.643 + ], + "angle": 0, + "content": "[41] Alvy Ray Smith and James F. Blinn. 1996. Blue Screen Matting. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '96). ACM, New York, NY, USA, 259-268. https://doi.org/10.1145/237170.237263" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.643, + 0.914, + 0.663 + ], + "angle": 0, + "content": "[42] Herman Snellen. 1873. Probebuchstaben zur bestimmung der sehscharfe. Vol. 1. H. Peters." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.663, + 0.914, + 0.703 + ], + "angle": 0, + "content": "[43] G. J. Sullivan, J. R. Ohm, W. J. Han, and T. Wiegand. 2012. Overview of the High Efficiency Video Coding (HEVC) Standard. IEEE Transactions on Circuits and Systems for Video Technology 22, 12 (Dec 2012), 1649-1668. https://doi.org/10.1109/TCSVT.2012.2221191" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.703, + 0.914, + 0.723 + ], + "angle": 0, + "content": "[44] Unity Technologies. [n. d.]. Unity. ([n. d.]). Retrieved 2018-04-13 from https://unity3d.com/" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.723, + 0.914, + 0.765 + ], + "angle": 0, + "content": "[45] Sam Tregillus and Eelke Folmer. 2016. VR-STEP: Walking-in-Place Using Inertial Sensing for Hands Free Navigation in Mobile VR Environments. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). ACM, New York, NY, USA, 1250-1255. https://doi.org/10.1145/2858036.2858084" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.764, + 0.914, + 0.795 + ], + "angle": 0, + "content": "[46] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing 13, 4 (2004), 600-612." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.794, + 0.914, + 0.825 + ], + "angle": 0, + "content": "[47] Fabian Werfel, Roman Wiche, Jochen Feitsch, and Christian Geiger. 2016. Empathizing Audiovisual Sense Impairments: Interactive Real-Time Illustration of Diminished Sense Perception. In Proc. of AH." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.825, + 0.914, + 0.875 + ], + "angle": 0, + "content": "[48] Yu Zhong, Astrid Weber, Casey Burkhardt, Phil Weaver, and Jeffrey P. Bigham. 2015. Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping. In Proceedings of the 12th Web for All Conference (W4A '15). ACM, New York, NY, USA, Article 29, 10 pages. https://doi.org/10.1145/2745555.2747277" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.874, + 0.914, + 0.896 + ], + "angle": 0, + "content": "[49] George J. Zimmerman. 1979. Zimmerman Low Vision Simulation Kit. (1979). Retrieved 2018-04-13 from http://www.lowvisionsimulationkit.com/" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.109, + 0.914, + 0.896 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf b/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..de7eaa20ab9720206ae7cb5a68278e7cc27839f1 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:652712b520275a0f7ffc619f56020cbc7f5e570f45a5a65bd0dd4384d5e22141 +size 2787992 diff --git a/data/2025/2503_12xxx/2503.12933/full.md b/data/2025/2503_12xxx/2503.12933/full.md new file mode 100644 index 0000000000000000000000000000000000000000..eb79ce42bb0d15c1c654f26b266f15d4ebdd909d --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/full.md @@ -0,0 +1,478 @@ +# Empath-D: VR-based Empathetic App Design for Accessibility + +Wonjung Kim\* wjkim@nclab.kaist.ac.kr KAIST + +Kenny Tsu Wei Choo kenny.choo.2012@smu.edu.sg Singapore Management University + +Youngki Lee +youngkilee@smu.edu.sg +Singapore Management University + +Archan Misra + +archanm@smu.edu.sg + +Singapore Management University + +Rajesh Krishna Balan + +rajesh@smu.edu.sg + +Singapore Management University + +# ABSTRACT + +With app-based interaction increasingly permeating all aspects of daily living, it is essential to ensure that apps are designed to be inclusive and are usable by a wider audience such as the elderly, with various impairments (e.g., visual, audio and motor). We propose Empath-D, a system that fosters empathetic design, by allowing app designers, in-situ, to rapidly evaluate the usability of their apps, from the perspective of impaired users. To provide a truly authentic experience, Empath-D carefully orchestrates the interaction between a smartphone and a VR device, allowing the user to experience simulated impairments in a virtual world while interacting naturally with the app, using a real smartphone. By carefully orchestrating the VR-smarphone interaction, Empath-D tackles challenges such as preserving low-latency app interaction, accurate visualization of hand movement and low-overhead perturbation of I/O streams. Experimental results show that user interaction with Empath-D is comparable (both in accuracy and user perception) to real-world app usage, and that it can simulate impairment effects as effectively as a custom hardware simulator. + +# CCS CONCEPTS + +- Human-centered computing $\rightarrow$ Systems and tools for interaction design; Ubiquitous and mobile computing systems and tools; Accessibility design and evaluation methods; Accessibility systems and tools; Ubiquitous and mobile computing design and evaluation methods; + +# KEYWORDS + +empathetic design; accessibility; mobile design; virtual reality; multi-device, distributed user interfaces + +# ACM Reference Format: + +Wonjung Kim, Kenny Tsu Wei Choo, Youngki Lee, Archan Misra, and Rajesh Krishna Balan. 2018. Empath-D: VR-based Empathetic App Design for Accessibility. In MobiSys '18: The 16th Annual International Conference on Mobile + +*This work was done while the author was on an internship at Singapore Management University + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +MobiSys '18, June 10-15, 2018, Munich, Germany + +$\odot$ 2018 Association for Computing Machinery. + +ACM ISBN 978-1-4503-5720-3/18/06...$15.00 + +https://doi.org/10.1145/3210240.3210331 + +![](images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg) +Figure 1: Overview of Empath-D + +Systems, Applications, and Services, June 10-15, 2018, Munich, Germany. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3210240.3210331 + +# 1 INTRODUCTION + +Digital interactions have become increasingly commonplace and immersive. We now constantly interact with our personal devices and computing-enhanced ambient objects (such as coffeemakers, home automation systems and digital directories), while engaging in everyday activities, such as commuting, shopping or exercising. Given the ubiquity of such interactions, it is important to ensure that the associated computing interfaces remain accessible to segments of the population, such as the elderly, who suffer from various impairments. The global elderly population is projected to reach $16.7\%$ by 2050 [33], and such users suffer disproportionately from impairments (e.g., vision) that hinder accessibility. + +To support more accessible design, our earlier work [11] introduced the vision of Empath-D, which uses a virtual reality (VR) device to provide mobile application/object designers with a realistic emulation of the interaction experience that impaired users would encounter. In this work, we present the design, implementation and validation of the Empath-D system inspired by this vision. Empath-D's goal is to allow unimpaired application designers to step into the shoes of impaired users and rapidly evaluate the usability of alternative prototypes. While we shall principally focus on empathetic evaluation of mobile applications (apps), Empath-D's design is generic enough to permit emulation of other real-world interactions-e.g., how an elderly user with cataracts and hearing loss would experience a traffic-light controlled pedestrian intersection. + +Empath-D's $^1$ key idea is to present the user with an impairment-augmented view of the smartphone interface (or other digital objects) in a virtual world, while allowing the non-impaired user to perform natural interactions, using a physical smartphone, with a real-world instance of the smartphone app. At a high-level, Empath-D works as follows (see Figure 1): The (unimpaired) user uses a physical smartphone to perform real-world interactions (such as scrolls, taps or gestures) with the app, while wearing a VR device. The results of such interactions are projected instantaneously through the I/O interfaces (e.g., screen, speaker) of a 'virtual smartphone' visible in the VR display, but only after those I/O streams have been appropriately degraded by the specified impairment. For example, in Figure 1, the virtual phone's display (and the world view) has been appropriately vignetted, to mimic the experience of a user suffering from glaucoma. + +Key Challenges: To mimic impairments with adequate fidelity and usability, Empath-D must support the following features: + +- Fast, Accurate Multi-device Operation: Empath-D utilizes a split-interaction paradigm: a user interacts with an app using a real-world handheld smartphone, while perceiving (viewing, hearing) the app responses through the VR interface. To faithfully replicate the real-world experience, this split-mode interaction must have tight time coupling and visual fidelity (of the virtual phone's screen), comparable to direct interactions with a standalone smartphone. +- Real-time Tracking: To preserve a user's perception of naturalistic interactions, Empath-D must not only capture explicit phone events, but also mirrors physical actions taken by the user (e.g., swinging the phone around or having one's hand hover over the phone). Thus, Empath-D must also track and render, in real-time, the orientation/location of both the phone and the user's hand within the VR device's field-of-view. +- Lightweight Impairment Execution: To preserve the feel of natural interaction, Empath-D must insert the impairment-specific perturbations into the input/output streams with imperceptible latency or computational overhead (e.g., no reduction in video frame rate). + +Key Contributions: We make the following major contributions: + +- 3-Tier Virtualisation Model: We design a novel 3-tier architecture where (i) the real-world smartphone serves merely as a tracker, forwarding user interaction events (e.g., screen touch and gestures) to a computationally powerful intermediary, after which (ii) the intermediary device perturbs those events by blending in specific input impairments (e.g., hand tremors) and passes them to an app instance running on a smartphone emulator, and finally (iii) the VR device receives the redirected outputs from this app instance and renders an appropriately-impaired (by blending in the output impairments) virtual world, including a virtual smartphone. +Real-time Hand and Phone Tracking: We use an RGB-Depth camera, mounted on the head-worn VR device, to track the outline of a user's hand, and subsequently perform a lightweight but realistic 3-D rendering of the hand on the VR + +display. We also use fiducial marker tracking [14] by the camera to track the position/orientation of the real-world smartphone. We demonstrate our ability to achieve both high-fidelity (pointing error $\leq 5\,mm$ ) and low-latency (end-to-end delays below 120 msec) hand tracking and display. + +- Usability of Virtualized Phone, in Use Environments: We show that Empath-D is not just usable, but that user performance (absent impairments) using Empath-D's virtual smartphone is equivalent to real-world interaction with a smartphone. In addition, we allow usability testing of apps in their use environments, a key enabler for design of mobile applications which may be used anywhere. Our Samsung Gear VR-based prototype has end-to-end latency low-enough (only 96.3 msec of latency, excluding the mobile app emulation) to permit faithful reproduction of direct smartphone usage. +- Validation of Impairment Fidelity and Overall System: We implement two distinct vision (glaucoma & cataract) and one audio (high-frequency hearing loss) impairment in our Empath-D prototype. We then conduct a set of studies using the vision impairments, where 12 participants perform a series of standardised activities (e.g., add an alarm), using both our Empath-D prototype (test) and a commercial hardware vision impairment simulator (control) and establish that the performance of users is equivalent across the test and control groups. Finally, we conduct a small-scale study to provide preliminary evidence that our empathetic approach allows developers to design accessible mobile UIs faster and better. + +# 2 THE EMPATH-D VISION + +We use an example to illustrate the use of Empath-D: + +Designing for Visual Impairment. Alice is designing a mobile app that automatically magnifies text from real environments seen through its rear camera to aid people who suffer from cataracts (a condition that dims and blurs vision). Alice starts Empath-D and is presented with a web interface that allows her to customise impairments (e.g., specify the intensity of visual blur). After customising the environment, Alice clicks in the Empath-D web interface to (1) compile the environment to her phone used for VR display (VR-phone)2 and (2) connect an input/output service to a separate phone (IO-phone). She then plugs the VR-phone into the VR headset. + +Alice then compiles her Android app, and runs it in the Android emulator. She puts on the VR headset and holds the IO-phone in her hands. A virtual smartphone (Virt-phone) shows up in VR, tracking the real-world motion of the IO-phone. Alice now navigates through the virtual world, experiencing it as an "impaired user, with cataracts". She holds up IO-phone on a street corner (in the real world), and notices that the magnified text (as seen in the virtual phone in the virtual world) is not clear enough to be legible to a cataract-impaired user. She can now iteratively and rapidly modify her app, recompile it, and execute it in the Android emulator, until she is satisfied with the output. This scenario demonstrates the ease-of-use for Empath-D, with no need for special instrumentation of the app. + +# 3 SYSTEM OVERVIEW + +# 3.1 Design Goals and Implications + +Empath-D has the following key goals, which directly influence the salient implementation choices. + +- Holistic emulation of impairments: For a truly empathetic experience, the app designer must perceive the effects of impairments not just while using the mobile app, but throughout her immersion in the virtual world. Consider a user, suffering from cataract, who is interacting with her smartphone while attending a dimly dit dinner gathering. Simply blurring the phone display, while leaving the background illumination and focus unchanged, might not replicate challenges in visual contrast that an impaired user would face in reality. This requirement precludes the straightforward use of I/O redirection techniques such as Rio [8], which can potentially perturb the I/O streams of only the mobile device. Instead, the impairment must be applied holistically, to the entire virtual world. +- Realistic emulation of smartphone and mobile apps in the virtual world: Empath-D aims at realistically emulating mobile apps within the virtual world rendered by a commodity VR headset. Realistic emulation of mobile apps imposes two requirements. (a) First, the virtual smartphone should have sufficient visual resolution, corresponding to typical usage where the smartphone is held $\approx 30\mathrm{cm}$ away from the eye. We shall see (in Section 6.3) that this requirement, coupled with differences in display resolutions between smartphones and VR devices, requires careful magnification of the virtual smartphone to provide legibility without hampering usage fidelity. (b) Second, the user should not perceive any lag between her user input and the rendered view of the app, seen through the VR device. Quantitatively, we thus require that the task completion time, experienced by a user interacting with the emulated application in the virtual world, should be comparable to real-world app usage on a real smartphone. +- Use of unmodified app For easy and low-overhead adoption by app designers, Empath-D should support the emulation of mobile applications using the original, unmodified binaries (e.g., .apk for Android). Empath-D's requirement to support empathetic emulation without app modifications implies that app designers would be able to adopt Empath-D with minimal impact to existing development practices. +- Low-latency, accurate finger tracking: This goal is an extension of the holistic emulation objective. In the real-world, users utilise instantaneous visual feedback and proprioception to move their fingers around the smartphone display, even when they are hovering but not actually touching the display. To ensure consistency between the user's tactile, visual and proprioceptive perceptions of her hand movement, Empath-D should also realistically render, in the virtual world, the user's hand movements and any changes in the position/orientation of the real-world smartphone, without any perceptible lag. In Section 6, we shall see how the Empath-D implementation meets these stringent performance bounds. + +![](images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg) +Figure 2:Empath- $D$ architecture + +- Light-weight, effective emulation of impairments: Empath-D will need to emulate impairments, at different levels of severity. For high-fidelity empathetic emulation, the insertion of such impairments in the I/O streams of the smartphone should not add generate any additional artefacts (e.g., increased latency, reduction in display refresh rate, etc.). + +# 3.2 System Overview + +We now present the overview of the Empath-D system (illustrated in Figure 2). + +Using Empath-D in VR. To immersively evaluate the application, the developer (or the tester) starts by installing her developed application binaries (i.e., Android .apkss) to run on the emulated smartphone. The developer then adjusts the profile settings for the impairment using Empath-D's web dashboard and selects a use case scenario (e.g., in office, in the street, etc.). She holds her physical smartphone and puts on the VR headset, earphones (when hearing impairments are involved) and experiences the immersive reality (where she can use the app - now mapped onto the physical smartphone - with the configured impairment under the designated use case scenario) that Empath-D generates. She then tests out various interfaces and functionalities of the app in the immersive VR environments. + +Components of Empath-D. Empath-D runs across three different physical devices: a physical smartphone, a computer, and a VR device (see Figure 2). + +Smartphone: In Empath-D, the user interacts with the app using a real smartphone held in her hand. Interestingly, this smartphone does not run the app itself, but functions as a tracking device, helping to preserve the user's realistic sense of smartphone interaction. The smartphone simply redirects the user interaction events (e.g., touch events such as clicks and swipes on the display and motion events captured by inertial sensors) to the computer, which is in + +charge of the app emulation. This smartphone also displays a fiducial marker array [14] on its display, to help in efficient, real-time tracking of the phone's location. + +Computer: The computer is at the heart of Empath-D's ability to fuse the real and virtual world. It consists of two major components: Phone and Hand Tracker and Mobile Emulator, as well as a Web Dashboard (see Figure 6), which allows the user to select the impairment profile to be applied. In addition, as we shall discuss shortly, this computer may run an Impairment Generator cum Virtual World Renderer). Key functions include: + +- The Phone and Hand Tracker, uses image captured by the VR headset-mounted camera, to track the position and pose of the smartphone (relative to the VR device), and create the virtual phone image at the correct position in the virtual world. It also uses the same camera to track the user's hand, as it interacts with the smartphone, and then renders it in the virtual world. +- The Mobile Emulator executes the app being tested, using the redirected stream of user interaction events transmitted by the smartphone. The resulting visual output of the app is then transmitted as a sequence of images to the VR device, where these images will be integrated into the virtual phone object; likewise, audio output (if any) is directly streamed to the VR device. + +The overall Empath-D framework includes an Impairment Generator that is typically applied as one or more filters over the Virtual World Renderer (an engine such as Unity [44]) which is responsible for combining various virtual objects and rendering the virtual world). The Impairment Generator effectively perturbs/modifies the audio/video feeds of the virtual world, before they are displayed on the VR device. For example, to emulate cataracts, it applies an appropriate 'blurring/dimming' filter on the video feed; similarly to emulate high-frequency hearing loss (an audio impairment), this generator will apply a low-pass filter on the output audio stream. These two components are placed inside a dotted-line rectangle in Figure 2, to reflect the reality that these components run on either the Computer or the VR device, depending on whether the VR device is tethered or not. In untethered VR devices (such as the Samsung Gear VR), the Impairment Generator and the Virtual World Renderer run on the VR device itself. In contrast, tethered devices such as the HTC Vive will run on the computer, and typically offer higher graphics quality, frame rates, faster execution. + +VR Device: Finally, the VR device is used to display the synthesised virtual world to the user. This synthesis involves the fusion of the virtual smartphone, the user's hand and the ambient virtual world, all subject to the impairment filter. + +# 4 VR-BASED EMULATION OF MOBILE INTERACTION + +Empath-D follows a split-interaction paradigm: for realistic immersion, Empath-D renders the visual and audio output of the target app in the virtual world (i.e., via VR headset's display and speakers), while allowing the user to interact naturalistically with a real-world physical phone. A major challenge in this paradigm is to enable natural, low-latency tracking and display of the real-world motion of both the phone and the user's hands, so as to ensure consistency + +across the user's visual, tactile and proprioceptive experience. We achieve this by performing three distinct steps: (a) smartphone tracking, (b) hand tracking, and (c) hand rendering in VR, using an RGB-Depth (RGB-D) camera mounted on the VR headset. Empath-D first tracks the position and orientation of the physical smartphone and synchronises the position of the virtual phone to the physical smartphone (See Section 4.1). Separately, Empath-D also captures fingers in the real world and displays them at the correct position (relative to the virtual smartphone) in the virtual world (See Section 4.2 and 4.3). + +![](images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg) +Figure 3: Tracking physical phone with fiducial markers + +Empath-D uses the headset-mounted RGB-D camera to capture the colour image along with the depth values, relative to the camera. The camera's position is always fixed, relative to the user's head. Its three axes are thus aligned to a user's head: $z$ -axis to the user's forward (gaze) direction, and $x$ and $y$ axes capturing the vertical and horizontal displacement. + +# 4.1 Tracking the physical smartphone + +Empath-D uses fiducial markers, displayed on the physical smartphone's screen, to localise the smartphone efficiently. It takes a colour image as an input, and returns the transformation relative to the camera's coordinate system: translation and rotation, i.e., x, y, z, roll, pitch, yaw from the RGB-D camera's coordinate system. We employ a technique proposed and detailed in [14]. + +The Empath-D Hand Tracker component tracks the physical phone using markers captured by the camera. Each marker, displayed on the phone screen, has a distinct pattern. The tracker knows the position of each marker (e.g., top-left, top-right, bottom-left and bottom-right) in the physical smartphone screen's coordinate system. The system first detects these markers in a given colour image, identifying them based on their unique patterns (see Figure 3). In particular, the system recognises the coordinates of each of the four corners of each marker. Moreover, the system knows the true size of, and separation between, each marker. It then uses an object pose estimation algorithm (provided by openCV's solvePnP function [6]), along with the array of fiducial marker points, to compute the 3-D position and orientation of the smartphone. Past + +Algorithm 1 Hand Segmentation +1: Input: $T\gets$ Phone's translation (3-D vector) +2: Input: $R\gets$ Phone's orientation $(3\times 3$ rotation matrix), +3: Input: $F\gets$ RGBD Frame, 2-D array that each entry $F_{i,j}$ holds a color value and 3-D position relative to the camera. +4: Input: $V\gets$ 3-D region of interest (relative to the phone) +5: Output: fgMask, 2D bool array whose dimension equals to $F$ +6: +7: fgMask[i,j] $\leftarrow$ false for all $(i,j)$ +8: for point $(i,j)$ in $F$ do +9: if $(i,j)$ in screen_border then +10: /\* Case A: Blue background segmentation \*/ +11: fgMask[i,j] $\leftarrow$ 1-Blue $(F_{i,j}) + 0.5\cdot Red(F_{i,j}) > \tau$ +12: else +13: /\* Case B: Depth-based segmentation \*/ +14: posphone $\leftarrow$ $R^{-1}\cdot (Position(F_{i,j}) - T)$ +15: fgMask[i,j] $\leftarrow$ (posphone $\in V$ ) +16: end if +17: end for +18: return fgMask + +results [14] show that this technique can compute an object's position and orientation with sub-cm level accuracy. + +This fiducial marker-based algorithm would fail under two conditions: (a) when the markers are occluded by the user's hand, and (b) if the ambient illumination levels are too low or too high, reducing the contrast level of the markers. To tackle (a), the smartphone screen uses an entire array of markers displayed across the scene, thereby ensuring correct smartphone tracking as long as some part of the phone is visible. Contrast concerns are not particularly relevant in our scenario, as we assume that the user is testing the app in a regularly lit work/office environment. + +# 4.2 Hand Segmentation + +Empath-D uses the frames captured by the RGB-D camera to track and segment the user's hand. For each frame, we extract the segment (polygon of pixels) that represents the user's hand, and render that segment in the virtual world. As the goal of hand-tracking is to provide the user with a natural view of her smartphone interactions, we restrict the tracking technique to a 3-D region of interest (ROI) that is centred at the phone, with a depth of $2cm$ and a planar boundary of $6cm$ . In other words, we only track the hand while it is $\leq 2cms$ away from the smartphone screen, and within $\leq 6cms$ of the smartphone edges. + +A straightforward approach is to apply a depth-based segmentation strategy, where we first isolate only the foreground points which lie within a depth $= 2cm$ of the smartphone surface. However, we empirically observed that, due to the glossy surface of the smartphone, such depth estimation was inaccurate for points located on the smartphone's screen. Accordingly, we implemented two separate segmentation methods (detailed in Algorithm 1): (case A) a colour-based segmentation approach to identify points which are directly over the smartphone, and (case B) a depth-based approach to identify points which are near, but not over, the smartphone's + +![](images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg) +Figure 4: Mesh of hand + +![](images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg) +Figure 5:Empath- $D$ hand segmentation + +screen. We apply the colour-based segmentation to the points inside the screen's border (thick orange contour in Figure 3) and the depth-based approach to the points outside. + +Colour-based segmentation: We adopt the colour-based technique proposed in [41]. The approach tests RGB values to segment foreground (hand) from background, coloured in blue. In our scenario, we target human skin as the foreground. Human skin has a property common in all races: its R value has about twice the value of G and B ( $R \approx 2G \approx 2B$ ). Given the property of human skin, we obtain a formula that discriminates the foreground from the background whose $B$ value is 1 (line 11 in Algorithm 1). $\tau$ is a user-tunable threshold which allows it to adapt to different lighting conditions. + +However, note that, to enable tracking of the phone, the phone's screen cannot be completely blue, but will need to contain the array of fiducial markers. We tackle both problems simultaneously by using blue ( $R = 0$ , $G = 0$ , $B = 1$ ) to colour the markers, over a cyan ( $R = 0$ , $G = 1$ , $B = 1$ ) background. Here we modified only $G$ value, which is unused in the colour-based segmentation. + +Points outside the smartphone's screen are segmented using the depth-based approach. After identifying the points corresponding to the user's hand, the system translates these points to 3-D coordinates in the camera's coordinate system, using the associated depth values. + +# 4.3 Rendering the hand in the virtual world + +After detecting the hand segment, the Empath-D system renders it in the virtual world. The system passes the tracked hands to the Virtual World Renderer, sharing the (i) 3D structure of the hands (surface mesh), (ii) colour image of the RGB-D frame (texture), and (iii) mapping between the surface mesh and the colour image (UV map). In common rendering engines (e.g. Unity), the 3D structure of the hand is represented by a triangle mesh-i.e., a set of vertices, constituting individual small triangles. The mesh is rendered at the same location as the user's hand in the real world. As the user's hand is localised in the coordinates of the RGB-D depth camera, the location is offset by an additional depth value (7cm in our implementation), to reflect the additional distance between the centre of the user's eyes and the depth camera. An important characteristic of our algorithm is that we render the actual image of the user's hands over this triangle mesh. Figure 4 illustrates the Delaunay + +# Empath-D Dashboard + +cataract (blur and contrast reduction) + +enabled* + +enabled + +Blur intensity* + +0.1 + +Contrast reduction intensity + +1 + +triangulation of a set of points. The mesh is combined with the hand's image (Figure 5), and rendered in the VR display. Extracting and rendering the actual image of the user's finger enhances the immersive feeling of real-life smartphone navigation in the virtual world. + +The complexity of the mesh-i.e., the number of vertices (or triangles) in the rendered hand-is an important parameter in the rendering process. A larger number of vertices captures the contours of the hand more precisely, resulting in a more life-like image. However, this also results in added rendering latency in the rendering engine. To support the twin objectives of low-latency and life-like rendering, we utilise a sub-sampling technique to construct the mesh. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a 32-fold downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the row and column axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We shall show, in Section 6, how our prototype Empath-D implementation uses this technique to achieve our twin objectives. + +# 5 IMPAIRMENT SIMULATION + +Empath-D aims to enable evaluation of the usability of app designs under visual, auditory and haptic impairment simulation. Realistic simulation of various impairments in the VR world is the essential requirement to achieve this goal. + +There has been a thread of research to simulate impairments through physical simulator devices [1, 13, 29, 39, 49]. For instance, Zimmerman et al. use goggles and enclosing materials to simulate low vision impairments [49]. These hardware simulators generalise the impairment of interest and enable simulation of specific aspects of the impairment pathology rather than emulate exactly how an impairment is. However, impairments can vary greatly between individuals. For instance, glaucoma generally progresses in deterioration from the periphery towards the centre of vision, but in reality, it comes in different shapes and severity, affecting usability of applications in different ways. Existing physical impairment simulators simply approximate this as a central circle of + +![](images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg) +Figure 6: Screenshot of Empath-D impairment configuration dashboard + +![](images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg) +Figure 7: Simulated cataract (left) and simulated glaucoma (right) + +clarity, with blur through to the periphery. Empath-D is advantageous over existing physical simulators in the following ways, it allows: 1) impairments to be customised, 2) simultaneous manifestation of multiple impairments, 3) the addition of new impairments easily. Figure 6 shows the web interface for designers to customise impairments for the target user group. + +# 5.1 Simulating Visual Impairments + +Vision is the dominant sensory system by which humans perceive the world, and is a key focus for Empath-D. Vision impairment is one of the most common causes of accessibility problems that comes with age. Common vision impairments include cataracts, glaucoma, and age-related macular degeneration. Such vision impairments present as reduced visual acuity, loss of central/peripheral vision, or decreased contrast sensitivity. It is widely studied that these symptoms can affect the interaction with various desktop and mobile applications; for example, humans use peripheral vision to pre-scan text ahead of his/her point of focus. As the peripheral vision narrows, the scanning becomes less effective, which slows reading [23]. In this work, we examine and simulate two commonly found visual impairments - cataracts and glaucoma. + +Our approach is to apply an image effect at the "eye" (i.e., a camera pair of view renderers) of the VR scene. From this camera pair, the image effect will apply to all other objects in the scene (e.g., smartphone, fingers, scene), just as how impaired users would experience it. We employed various image filters for different impairments, which 1) provide realism of impairments to help designers to find out usability issues and take corrective actions, and 2) have small computational overhead not to add noticeable delays to our entire emulation. + +The approach is flexible and lightweight. Impairment simulator's intensity is configurable at runtime. The image effects are applied at the last stage of the rendering pipeline. Glaucoma presents functionally as a loss in peripheral vision. To simulate glaucoma, we use a vignette with a clear inner circle, blurred inner-outer circle, and black extending outwards from the outer circle (see Figure 7). Cataracts presents functionally as reduced visual acuity and reduced contrast sensitivity. We use a blur filter to simulate reduced visual acuity, and a contrast reduction filter to simulate reduced contrast sensitivity (see Figure 7). + +Table 1: Hardware of Empath-D + +
VR headsetSamsung Gear VR [5]
VR smartphoneSamsung Galaxy S7 [4]
RGB-D cameraIntel RealSense SR300 [20]
PCCPU: 4 cores, 3.4 GHz +RAM: 16 GB +GPU: GeForce GTX 1080 [32]
Physical IO smartphoneSamsung Galaxy S5 [40]
+ +The functional aspects of vision impairments are straightforward to create in VR, which give Empath-D high extendability to implement other types of visual impairments. While we just described two impairments pertaining to our studies, it is easy to create other impairments such as colour filters to simulate colour blindness. However, we leave the effect of eye movements on impairments as the future work. Since eye-tracking is currently not supported in Empath-D, a user will need to move his head to achieve the same effect. + +# 5.2 Simulating Other Modalities + +We discuss how other modalities may be simulated in Empath-D. + +Hand Tremors. Hand tremors are a common symptom of Parkinson's disease or Essential tremor and make it hard for one to precisely point on a touchscreen. A hand tremor may be characterised by the frequency and amplitude of oscillatory movement. Since we present virtual representations of the user's hand (i.e., as a 3D mesh) to enable his interaction with the virtual mobile phone, Empath-D similarly perturbs this 3D mesh in VR to create hand tremors. While a user may physically not experience hand movement, the visual perturbation would be sufficient to hinder accurate touch to simulate hand tremors. + +Hearing Loss. High-frequency hearing loss is a common symptom for the elderly population. People diagnosed with high-frequency hearing loss are unable to hear sounds between $2,000\mathrm{Hz}$ and 8,000 Hz. These people often struggle to understand or keep up with daily conversations (missing consonants in higher registers, such as the letters F and S or female voices). Empath-D applies a bandpass filter over the output sound of the target application to diminish the sound signals between $2\mathrm{kHz}$ and $8\mathrm{kHz}$ and plays the filtered audio feed through the VR device. + +# 6 IMPLEMENTATION + +# 6.1 Hardware + +We implemented our current Empath-D prototype using the hardware described in Table 1. We used the Samsung Gear VR fitted with the Samsung Galaxy S7 as the VR headset. We used the Intel RealSense SR300 RGB-D camera for finger tracking, selecting this among alternatives as: 1) its small size and low weight allowed us to easily attach it to the VR headset, and 2) its minimum sensing range is low enough to permit hand tracking at a distance of $30\mathrm{cm}$ . We employed the Samsung Galaxy S5 as the physical I/O device, and a powerful laptop (4 core 3.4 GHz CPU, 16GB RAM) as the intermediary device. The choice of the VR headset itself was deliberate. We chose a Samsung Gear VR headset (an untethered + +![](images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg) +Figure 8: Rendering frame rate under varying virtual display resolution (width : height = 9 : 16, default resolution of Android emulator is 1080x1920) + +smartphone-powered VR device) over more powerful PC-tethered VR devices such as the HTC Vive or Oculus Rift. This was mainly because PC-tethered devices such as HTC Vive use IR lasers to localise the headset, which interferes with the IR laser emitted by the RGB-D camera used for depth sensing in hand tracking. + +# 6.2 Rendering an Emulated App + +We used empirical studies to determine an appropriate screen resolution and frame rate to render the emulated app (and the smartphone) in the VR headset. Empath-D obtains screenshots of its mobile emulator using the Android virtual display [35] and transmits these screenshots over WiFi to the Gear VR device. The overhead of transmitting and rendering these emulated screenshots is proportional to their resolution. The default 1080p resolution could sustain a frame rate of only 18 fps, which causes visible jerkiness. To reduce this overhead, we reduced the resolution (using setDisplayProjection() method), and applied differential transmissions, sending a screenshot only when the emulated app's display changes. + +Figure 8 shows the experimental results on the tradeoff between the resolution and the rendering frame rate, obtained while playing a video to ensure continuous change of the screen content. The frame rate saturates at $57~\text{fps}$ , at a screen resolution of $485\times 863$ . Moreover, through another user study (described next) to understand the minimum resolution to read an app's contents, we empirically verified that the participants had no issues in reading the app's content at the resolution of $485\times 863$ . Hence, we choose this resolution as our default, although this setting can be modified (e.g., we can pick a higher resolution, and a lower frame rate, for an app with mostly static content). + +If Empath-D displays the virtual smartphone at its original size in the virtual world (portrait position), its display becomes illegible. For example, the Samsung Galaxy S7 (in the Gear VR) has a resolution of $2560 \times 1440$ and an $\approx 101^{\circ}$ horizontal field of view yielding a horizontal pixel density of $\approx 25.3$ pixels/degree. When a virtual phone is held at $30\mathrm{cm}$ away, the horizontal pixel density drops below 25.3 pixels/degree due to downsampling of the virtual phone screen as seen through the VR display. This presents a problem for viewing the content of the virtual phone - in particular, text - as its pixel density is significantly lower than when viewing a physical + +![](images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg) +Figure 9: Readable font size of the virtual smartphone at a magnification ratio + +phone. For instance, the Galaxy S5 gives $\approx 89.4$ pixels/degree at $30\mathrm{cm}$ distance. + +We tackle this issue by scaling up the virtual phone's size by a factor that ensures that the phone's display text remains legible. To determine this factor, we recruited three participants and asked them to record the minimum readable font sizes, while showing them a virtual smartphone (at a distance of $30~\mathrm{cm}$ ) with various magnification ratios (increased by 0.1 from 1.0 to 2.7). Figure 9 shows that participants could read text with the font size= 12sp (the commonly used minimum font size for mobile apps) for magnification factors $\geq 1.5$ . Accordingly, we used 1.5 as the default magnification ratio for the smartphone and its display. We also proportionately scaled up the user's rendered hand. User studies (Section 7) show that users found this configuration highly usable. + +# 6.3 Rendering Virtual Hand + +As discussed in Section 4.3, the rendering latency of the virtual hand is proportional to the number of vertices in the Delaunay triangulation-based mesh. To reduce the latency, we apply a nonuniform sampling approach. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the $x$ and $y$ axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We empirically determined the sampling rate $X$ , by varying $X$ and measuring both (i) the processing latency and (ii) the SSIM [12, 46] (Structural SIMilarity; a metric of perceived image quality) of the hand images, using 200 RGB-D frames. Figure 10 shows the results. Without any subsampling ( $X = 0\%$ ), the rendering latency is 311.1 msec, which is too high for our responsiveness goal. We empirically downsample the internal pixels by a factor of 32 ( $X = 99.9\%$ ), i.e., choosing every $32^{nd}$ pixel on the grid. This results in a latency of 26.9 msec, while keeping the SSIM = 0.976, a level indistinguishable with the original as perceived by a human. + +# 6.4 Environment Emulation + +To enable holistic evaluation of app interactions, Empath-D emulates not just the virtual phone, but the entire virtual world as well. In our current implementation, we emulated a crowded Urban Street environment, which includes crosswalks, traffic lights, pedestrians and commonplace roadside obstacles. To further mimic real-world + +![](images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg) +Figure 10: Rendering latency vs. image quality of the virtual hand + +movement, our implementation allows the user to navigate the virtual world by (i) rotating her head (this uses the head tracking ability of the VR device), and (ii) by 'walking in place', using the technique proposed in [45] as this does not require any additional hardware on the VR device. + +# 6.5 VR Manager + +This component currently executes on the VR smartphone, and is responsible for combining the output of the various components (Hand Tracker, Phone Tracker and Virtual Phone) in the virtual world. This component, implemented as a Unity application, renders these various components. This component is also responsible for applying the impairments on the output of the virtual world. The image effects simulating low vision impairments are defined as a script, Shaders in Unity. + +# 7 EVALUATION + +We now present a mix of system and user experiments to evaluate the performance and efficacy of our Empath-D implementation. Besides micro-benchmark studies, we conducted two experiments to capture user interaction with Empath-D. In Experiment 1, we examine the performance of Empath-D vs. a real-world smartphone, in the absence of any impairments. In Experiment 2, we consider an impairment-augmented version of Empath-D, comparing the performance of users against the use of commercial impairment simulation hardware. + +# 7.1 Micro-benchmark Performance of Empath-D + +We measured the overall latency of Empath-D, both in terms of the delay in reflecting touch interactions in the virtual world and in terms of the hand tracking delay. + +7.1.1 End-to-end Latency of Touch Interaction. As a measure of the overall responsiveness of Empath-D, we computed the latency between a touch input, on the physical smartphone, and the resulting change in the content of the virtual smartphone, rendered in the VR display. To measure this, we utilised a high framerate camera (operating at 240 fps) to concurrently record both the screen of the physical smartphone and the virtual phone (displayed in the VR). The phone screen is coloured green initially, and was programmed to turn red as soon as it received a touch input. We repeated the + +![](images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg) +Figure 11: Overhead of impairment simulation + +measurement 23 times, capturing (via the video frames) the time gap between (i) the physical smartphone screen turning red and (ii) the virtual smartphone turning red in the VR display. The end-to-end latency is 237.70 msec ( $SD = 20.43$ ). + +By monitoring the intermediary computer, we obtained the breakdown of this delay: (i) smartphone responsiveness (the time from the user touching the screen till the time the phone transmits the touch event to the computer) $= 0.3$ msec $(SD = 0.16)$ ; (ii) computer emulation responsiveness (the time from receiving the touch event till the time the screenshot of the modified display is sent to the VR device) $= 141.37$ msec $(SD = 6.6)$ , and (iii) the VR responsiveness (the time from receiving the screenshot till it is rendered on the VR display) $= 10.46$ msec $(SD = 8.36)$ . The remaining latency ( $\approx 87$ msec) can be attributed as the WiFi network latency. These micro-measurements suggest that the default Android emulator used in our studies was the dominant component of the latency. The default Android emulator is known to be fairly slow, and multiple third party emulators (e.g., Genymotion [16]) are reported to provide significantly lower latency. Accordingly, we anticipate that this overall latency can be reduced to $\leq 150$ msec, without any significant architectural modification of Empath-D. + +7.1.2 End-to-end Latency of Virtual Hand. We also evaluated the latency between the physical movement of the user's hand and the rendering of this movement in the VR display. To capture this time difference, we displayed a small circle, at a specific point on the display, on both the smartphone and the virtual phone. Users were instructed to swipe a finger on the screen to reach the circle. We measured, over 20 experiments, the time (no. of frames from the previously used high framerate camera) between the occlusion of the circle on the physical phone and the resulting occlusion in the virtual phone, computing an average latency of $117.46\mathrm{msec}$ ( $SD = 20.44$ ). Additionally, we measured the component delays of this rendering process as: (i) reading an RGBD frame: $4.90\mathrm{msec}$ ( $SD = 0.58$ ); (ii) phone tracking: $4.56\mathrm{msec}$ ( $SD = 0.25$ ); (iii) hand tracking: $8.0\mathrm{msec}$ ( $SD = 1.58$ ), and (iv) the VR responsiveness (the time from receiving the hand mesh till it is rendered on the VR display): $26.99\mathrm{msec}$ ( $SD = 5.22$ ). The remaining latency, attributable to the WiFi network, is $\approx 73\mathrm{msec}$ , consistent with the measurements reported above. + +# 7.2 Study Design for Usability Experiments + +We then conducted user studies on the usability and real-world fidelity of our Empath-D implementation. The user study (approved + +Table 2: Study Tasks and Conditions in Experiment 1 + +
TaskCond-itionImpairmentSimulator TypeEnviro-nment
T1-T4AnonenoneReal
BCataractsPhysicalReal
CnonenoneVirtual
DCataractsVirtualVirtual
EGlaucomaRealPhysical
FGlaucomaVirtualVirtual
+ +Table 3: Smartphone Interaction Tasks in Experiment 1 + +
Task TypeTask CodeTask Description
Everyday Phone UseT1Perform a Calculation
T2Add an Alarm
T3Search, Save Image on Browser
Controlled PointingT4Number Search and Point
+ +by our institution's IRB) consisted of 12 users (9 males) with no pre-existing uncorrected vision impairments. Users were aged 24-39, with a mean age of 30.3 years $(\mathrm{SD} = 5)$ . + +Study Tasks and Measures. We adopted a repeated measures design, with participants counterbalanced for condition order (see Table 2 for the conditions). Participants were asked to perform four different tasks split into two task types; everyday phone use, and controlled pointing (see Table 3). Users were asked to perform all tasks using two-handed interaction, holding the phone at a distance that they normally would during daily use. We chose two-handed interaction to eliminate for phone balancing that is typical in one-handed interaction given the typical size of today's smartphones. + +T1-T3 are everyday tasks users perform on a smartphone. They cover smartphone touch interaction of taps, swipes, and long press, on UI widgets such as keyboards, buttons and scrolling content. Users were asked to experience performing these tasks under six conditions, including under impairments (both using the physical hardware and the VR device). At the end of all three tasks (T1-T3), users completed the NASA-TLX[18] survey to indicate their perceived workload during task performance. T4, on the other hand, is a controlled pointing task experiment. Participants were given a stimulus number and then asked to click on the button with the corresponding number, as quickly and as precisely as they could. (See Figure 12 for a screenshot of the application used in this task.) Users repeated this task 80 times in succession, for each of the six conditions (A-F; see Table 2). We recorded the touch times and positions with the task app. We conducted a short semi-structured interview at the end of the study to understand users' experiences with, and perceptions of, the physical and virtual impairment simulations. + +Instruments: We compared Empath-D with a commercial physical impairment simulator [13]. To calibrate for visual acuity, we adapted a test similar to a Snellen eye test chart [42] - showing rows of letters with each lower row having a smaller font size. We first used the physical impairment simulator to obtain the minimum acceptable font size. Using the same test page in the VR, we applied + +11 + +
271613
251815
232026
121417
211119
221024
+ +Figure 12: Screenshot of a test application for the pointing task + +the impairment and gradually adjusted the severity until we hit the minimum acceptable font size. To calibrate the inner circle of clarity for glaucoma, we implemented an app that allows us to adjust the diameter of a coloured circle. We then used the physical impairment simulator for glaucoma, and adjusted the coloured circle to the point in which the circle reaches the fringe for clarity. We then calibrated the virtual glaucoma simulation in a similar manner. Three independent measurements for visual acuity and circle of clarity were taken from the research team and averaged to determine the final calibration parameters of font size $= 12$ sp and diameter $= 60$ mm. + +# 7.3 Empath-D vs. Physical Smartphone + +We first investigate whether the VR-based interaction is a sufficiently faithful replica of the real-world interaction that a user would have with a regular smartphone, in the absence of any impairments. + +Touch Accuracy: In all six conditions, users were able to achieve high levels of button touch accuracy (see Table 4), with the accuracy being $98.8\%$ ( $SD = 1.67$ ) when the users interacted unimpaired with the VR device. Comparing the accuracies between the physical smartphone and the VR device, we noted that the VR condition had an accuracy of $99.12\%$ ( $SD = 1.32$ ) (across all 6 conditions), whereas the use of the physical smartphone provided $100\%$ accuracy. In terms of the location accuracy, we noted a difference of $2.28 \, \text{mm}$ ( $SD = 2.98$ ) between the use of Empath-D vs. a physical smartphone. This difference is well within the uncertainty associated with finger touch interactions, and thus demonstrates that user performance was equivalent across both Empath-D and a physical smartphone. + +Perceived Workload: NASA-TLX scores indicated that the users did perceive significant differences in their workload using Empath-D, compared to use of the physical smartphone ( $Z = 2.824$ , $p = 0.005 < 0.05$ ). This does suggest that the navigating an app within the VR device does require greater cognitive effort than simply interacting with a regular smartphone. However, it is difficult to + +Table 4: Accuracy of Button Touch Across All Users + +
ImpairmentEnvironmentAccuracy (SD) %
NonePhysical100
Virtual98.79 (1.67)
CataractsPhysical100
Virtual99.09 (1.36)
GlaucomaPhysical100
Virtual99.49 (0.82)
+ +decipher whether this difference is due to Empath-D-specific issues, or a general lack of familiarity with VR devices. + +We additionally investigated the subjective feedback captured by the semi-structured interview. $83\%$ (10) of the users reported perceiving increased latency while using Empath-D, while 2 users indicated that they felt no noticeable latency difference. However, all 12 users indicated that the performance of Empath-D was "acceptable", and they would be able to use the Empath-D system for testing the usability of apps, as long as the apps do not require extremely low-latency interactions. (3 users indicated that the system might not be usable for testing real-time games.) + +# 7.4 Empath-D vs. Hardware Impairment Simulators + +We now study the performance of Empath-D vis-a-vis impairments generated using commercially available hardware. Figure 11 shows the overhead of Empath-D under impairment conditions, demonstrating that Empath-D is able to operate without significant performance loss even in the presence of impairments. + +Touch Accuracy: Table 4 enumerates the accuracy for the pointing task (T4) for two distinct impairments (Cataract & Glaucoma), for both the VR-based Empath-D system and the hardware impairment simulator. We see that, in the Cataract condition, Empath-D had a mean accuracy of $99.09\%$ , which is virtually indistinguishable from that of the hardware device ( $100\%$ ). A similar pattern was observed for the Glaucoma impairment ( $99.49\%$ for Empath-D vs. $100\%$ for Hardware). In terms of the location accuracy, we noted a difference of $1.7 \, \text{mm}$ ( $SD = 1.9$ ) (for Cataract) and $1.2 \, \text{mm}$ ( $SD = 1.6$ ) (for Glaucoma) between the use of Empath-D vs. the impairment hardware. Once again, this difference is well within the uncertainty associated with finger touch interactions. These results provide strong evidence that Empath-D is able to emulate impairment conditions that are equivalent to that of dedicated, commercial hardware. + +Perceived Workload: The numerical TLX scores indicated that there was no significant difference for Cataracts; however, the difference for Glaucoma was significant $(Z = 3.061$ , $p = 0.002 < 0.05)$ with users indicating a higher perceived workload for the VR device. + +# 7.5 Motion sickness + +At the end of the user study, we asked each participant if they felt discomfort or unwell. Only two of the twelve participants reported slight motion sickness while using Empath-D. Motion sickness may + +arise from: (1) the use of the VR display itself, and (2) the latency from Empath-D. However, it is difficult to separate the two. + +The effects of motion sickness are notably minor in our current prototype of Empath-D. The nature of our experimentation intensifies the use of the VR display, whereas practical use of Empath-D is likely to be more interspersed between app redesigns. We further discuss how we may improve on latency in Section 9.2 to reduce motion sickness that may result from the latency of Empath-D. + +# 8 RELATED WORK + +Designing for Inclusiveness. Newell et al. [31] pointed out that traditional user-centred design techniques provide little guidance for designing interfaces for elderly and disabled users due to the large variation amongst the type and degree of impairments. They also highlighted that the standard guidelines for designing disabled-friendly UIs are too general [30] and lacked empathy for users. For instance the WCAG 2.0 lists that the use of colour "is not used as the only visual means of conveying information, indicating an action, prompting a response or distinguishing a visual element". This requires interpretation by the designer into specific designs in his application. Over the years, various accessibility design guidelines (such as WCAG 2.0 [3], IBM Accessibility Checklist [38], US Section 508 Standards [2]) and tools (aChecker [15]) have been proposed and refined. However, the problems pointed out by Newell are remained unsolved to a large extent, which hinders elaborate design for a target user group with a specific impairment. + +Simulated Design. There exists prior work on helping UI designers design better interfaces for people suffering from vision impairments. Higuchi et al. [19] proposed a tool to simulate the visual capabilities of the elderly for the design of control panels, while Mankoff et al. [26] developed a tool to simulate a user with visual and motor impairments on the desktop screen. SIMVIZ [9, 47] uses the Oculus Rift VR device to simulate visual impairments to examine reading text on a smartphone. For audio modalities, Werfel et al. [47] simulated hearing ailments by using a pair of microphones with equalised headphones. + +Different from prior works, Empath-D uses VR as the medium for immersive evaluation to 1) flexibly support wider groups of impaired users, and 2) allow naturalistic interactions with a mobile phone in a virtual environment. This novel approach supports ecological validity in testing applications and is key for mobile apps which go beyond the static settings of previous work. + +While previous work has focused on simulation in single modality (visual or auditory), Empath-D is able to flexibly combine modalities to support any application type, ailment (visual, auditory, motor) and usage environment. + +System Support for Accessibility. Modern mobile OSes provide accessibility support; in particular, it allows users with far-sightedness to increase fonts and users with blindness to interact through vocal interfaces. Also, Zhong et al. enhanced Android accessibility for users with hand tremor by reducing fine pointing and steady tapping [48]. We believe Empath-D will significantly expand basic accessibility support of commodity devices and accelerates the design and deployment of various accessibility add-ons for different impaired users. + +Testing of Mobile Applications. Recently there have been many systems, such as VanarSena [37], AMC [22], Puma [17], DynoDroid [25], DECAF [24], AppsPlayground [36], for automatically testing and identifying various types of UI and systems bugs in mobile applications. Empath-D takes a different approach in that we do not detect bugs after the application is developed and deployed. Instead, we allow the designer to test early iterations of the designs rapidly. In this way, we hope to reduce the pain of having to make significant UI changes at the end of the design cycle - or worse, end with an application that cannot be used effectively by the target impaired demographic. + +# 9 DISCUSSION + +Our current studies indicate the considerable promise of Empath-D, as a mechanism for rapid and empathetic evaluation of app usability. We now discuss some additional studies and issues that we intend to explore further. + +# 9.1 User study with Designers + +We conducted a short user study with two mobile app developers to qualitatively examine Empath-D in actual use. Both developers have previously worked to create an Android mobile application, which was used as the baseline for the study. The developers were tasked with redesigning the mobile app for the glaucoma-impaired under two conditions: 1) without Empath-D, but with materials describing glaucoma and showing functionally accurate examples of glaucoma, and 2) with the same materials, and Empath-D. Both developers agreed that Empath-D helped them improve their designs over the baseline condition. The developers reported that Empath-D allowed them to improve their designs in two ways: 1) they can focus their attention on re-designing particular problematic parts of the UI, and 2) they are able to appropriately calibrate their modifications (for instance increasing the font size may help, but text that is too large will also cause glaucoma sufferers to visually scan more, causing fatigue). + +# 9.2 Dealing with Latency Issues + +Our experimental studies indicate that users are able to utilise Empath-D effectively for "conventional" apps—i.e., those that typically involve sporadic interaction by users with UI elements, such as buttons and keyboards. The current end-to-end latency (of $\approx$ 200 msec) is not an impediment for high-fidelity evaluation of such apps. However, the participants also indicated that this latency (lag between user actions and rendering in the VR display) would pose a problem for highly latency-sensitive applications, such as games. At present, it is thus appropriate to state that Empath-D potentially needs additional optimisations to support such applications. The most obvious improvement would be to replace the default Android emulator with a faster, custom emulation engine—this is likely to reduce $\approx$ 100 msec of the delay budget. + +The current implementation streams JPEG images (hand, emulator's screen) from the intermediary computer to the VR smartphone. We plan to adopt a low-latency video streaming codec such as H.265 HEVC [43], which would help reduce networking and rendering latency. OS-level optimisations (e.g., preemptive priority + +for inter-component messages) may be needed to support even lower latency. + +Recently, several works have proposed techniques for achieving high-quality VR experience on mobile devices [7, 10, 21]. Empath- $D$ could borrow some techniques to improve latency and video quality. + +# 9.3 User Performance with VR Devices + +Moreover, our user studies also indicated that the time for performing tasks (T1-T4) was marginally higher when using the VR environment, compared to the direct use of a real-world smartphone. More specifically, for the pointing task T4, there was an average difference of 654 msec in the task completion time using Empath-D, compared to the smartphone. In addition, anecdotal comments suggest that continued use of the VR device, for longer-lived sessions, might pose additional usability challenges. For example, a couple of users indicated some minor muscle fatigue, most likely as a result of using a 'heavy' VR device. It is an open question whether these issues will be mitigated over time, as VR devices become lighter and more ergonomic, and as users have greater familiarity with the use of VR devices. + +# 9.4 Advanced Uses of Empath-D + +Our current implementation of Empath-D supports the virtualisation of certain output modalities (specifically the display and audio) of the emulated app. The vision of Empath-D can be extended to create other richer interaction modes, often blending virtual and augmented reality (AR) settings. As an example, certain emulation conditions may need to generate and integrate synthetic sensor traces, to replace the real sensor traces from the smartphone-e.g., to mimic the user's movement in locations, such as forests and mountains, the phone's real GPS trace would need to be replaced by a synthetic GPS trace as in [27, 28]. Similarly, in some cases, the app itself might need to take inputs from the VR world-e.g., if the app was being used to magnify certain objects embedded in the VR world. While such use cases can be supported, they will require enhancements to the current Empath-D framework, and it is likely that the implementation may surface additional challenges, in terms of computational overhead and latency. + +# 9.5 Developing Impairment Filters and Profiles + +To demonstrate the viability of Empath-D, we focused on demonstrating the ability to simulate visual impairments and in particular cataracts and glaucoma. As we explored, these impairments have functional aspects that are commonly employed to characterise them, such as visual acuity or contrast sensitivity, and are often accompanied by standard tests such as the Snellen eye test chart [42] and Pelli-Robson contrast sensitivity chart [34] respectively. From examining the commercial physical impairment simulator and our experimentation, we believe that Empath-D has the ability to functionally simulate other impairments. + +We recognise two important directions that Empath-D needs address to improve impairment simulation and use. First, impairment filters have to be developed in concert with medical professionals who are subject matter experts in the areas of the specific pathologies. This helps to develop a library of impairment filters. Second, + +with verified impairment filters, we may create impairment profiles, which characterise groups of users with possibly overlapping requirements. For instance, a hypothetical impairment profile may calibrate for a demographic of a range of ages, sex, and percentage of the population who may have myopia and cataracts—both which affect visual acuity. With impairment profiles, app developers may easily select and understand the demographic to which they are designing for. + +# 10 CONCLUSION + +We presented the design and evaluation of Empath-D, a framework that allows app developers to 'step into the shoes' of impaired users, and perform an empathetic evaluation of their app interfaces. Our key idea is to utilise a virtual world (using a commodity VR device) to present an impaired view of the app's interface, while allowing the user to interact naturally with a real commodity smartphone in the physical world. Overcoming the current computational limitations (of the VR device and the Android emulator) required us to make careful system choices, such as (i) appropriate tradeoffs between the resolution and frame rate for rendering the virtual smartphone, (ii) subsampling of the mesh representing the user's hand and (iii) scaling up the size of the virtual smartphone to overcome the lower resolution of the VR device. User studies show that Empath-D is effective in (a) providing usability that is equivalent to using a real app (on a real smartphone), for applications that do not require ultra-low latency and (b) emulating impairments in a similar fashion to custom hardware devices. We believe that Empath-D can be a powerful new paradigm for effective bidirectional integration between real-world user actions and virtual worlds, and that this can enable additional immersive applications beyond just 'impairment emulation'. + +# 11 ACKNOWLEDGEMENT + +We are thankful to our shepherd Prof. Xia Zhou and all anonymous reviewers for their valuable reviews. This research is supported partially by Singapore Ministry of Education Academic Research Fund Tier 2 under research grant MOE2014-T2-1063, and by the National Research Foundation, Prime Minister's Office, Singapore under its IDM Futures Funding Initiative. All findings and recommendations are those of the authors and do not necessarily reflect the views of the granting agency, or SMU. + +# REFERENCES + +[1] [n. d.]. AGNES (Age Gain Now Empathy Systems. ([n. d.]). Retrieved 2018-04-13 from http://agelab.mit.edu/agnes-age-gain-now-empathy-system +[2] [n. d]. US Section 508 Standards. ([n. d]). Retrieved 2018-04-13 from https: //www.section508.gov/ +[3] 2008. Web Content Accessibility Guidelines (WCAG) 2.0. (11 December 2008). Retrieved 2018-04-13 from https://www.w3.org/TR/WCAG20/ +[4] 2016. Samsung Galaxy S7 Specifications. (2016). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/galaxy-s7/#!/spec +[5] 2017. Samsung Gear VR Specifications. (2017). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/gear-vr/specs/ +[6] 2018. SolvePnP, Camera Calibration and 3D Reconstruction, OpenCV. (2018). Retrieved 2018-04-13 from https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html +[7] Omid Abari, Dinesh Bharadia, Austin Duffield, and Dina Katabi. 2017. Enabling High-Quality Untethered Virtual Reality. In 14th USENIX Symposium on Networked Systems Design and Implementation (NSDI 17). USENIX Association, Boston, MA, 531-544. https://www.usenix.org/conference/nsdi17/technical-sessions/presentation/abari + +[8] Ardalan Amiri Sani, Kevin Boos, Min Hong Yun, and Lin Zhong. 2014. Rio: A System Solution for Sharing I/O Between Mobile Systems. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 259-272. https://doi.org/10.1145/2594368.2594370 +[9] Halim Cagri Ates, Alexander Fiannaca, and Eelke Folmer. 2015. Immersive Simulation of Visual Impairments Using a Wearable See-through Display. In Proceedings of the Ninth International Conference on Tangible, Embedded, and Embodied Interaction (TEI '15). ACM, New York, NY, USA, 225-228. https://doi.org/10.1145/2677199.2680551 +[10] Kevin Boos, David Chu, and Eduardo Cuervo. 2016. FlashBack: Immersive Virtual Reality on Mobile Devices via Rendering Memozoation. In Proceedings of the 14th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '16). ACM, New York, NY, USA, 291-304. https://doi.org/10.1145/2906388.2906418 +[11] Kenny Tsu Wei Choo, Rajesh Krishna Balan, Tan Kiat Wee, Jagmohan Chauhan, Archan Misra, and Youngki Lee. 2017. Empath-D: Empathetic Design for Accessibility. In Proceedings of the 18th International Workshop on Mobile Computing Systems and Applications (HotMobile '17). ACM, New York, NY, USA, 55-60. https://doi.org/10.1145/3032970.3032981 +[12] Eduardo Cuervo, Alec Wolman, Landon P. Cox, Kiron Lebeck, Ali Razeen, Stefan Saroiu, and Madanlal Musuvathi. 2015. Kahawai: High-Quality Mobile Gaming Using GPU Offload. In Proceedings of the 13th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '15). ACM, New York, NY, USA, 121-135. https://doi.org/10.1145/2742647.2742657 +[13] Marshall Flax. 2018. Low Vision Simulators. (2018). Retrieved 2018-04-13 from https://www.lowvisionsimulators.com/ +[14] S. Garrido-Jurado, R. Mu noz Salinas, F.J. Madrid-Cuevas, and M.J. Marin-Jiménez. 2014. Automatic generation and detection of highly reliable fiducial markers under occlusion. Pattern Recognition 47, 6 (2014), 2280-2292. https://doi.org/10.1016/j.patcog.2014.01.005 +[15] Greg Gay and Cindy Qi Li. 2010. AChecker: Open, Interactive, Customizable, Web Accessibility Checking. In Proceedings of the 2010 International Cross Disciplinary Conference on Web Accessibility (W4A) (W4A '10). ACM, New York, NY, USA, Article 23, 2 pages. https://doi.org/10.1145/1805986.1806019 +[16] Genymotion. [n. d.]. Genymotion Android Emulator. ([n. d.]). Retrieved 2018-04-13 from https://www.genymotion.com/ +[17] Shuai Hao, Bin Liu, Suman Nath, William G.J. Halfond, and Ramesh Govindan. 2014. PUMA: Programmable UI-automation for Large-scale Dynamic Analysis of Mobile Apps. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 204-217. https://doi.org/10.1145/2594368.2594390 +[18] Sandra G Hart and Lowell E Staveland. 1988. Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research. In Advances in psychology. Vol. 52. Elsevier, 139-183. +[19] Kazunori Higuchi, Yasuo Sakaguchi, Kazuhiko Sugiyama, and Tomoaki Nakano. 1999. Simulating the human vision of elderly for designing control panels. In Systems, Man, and Cybernetics, 1999. IEEE SMC'99 Conference Proceedings. 1999 IEEE International Conference on, Vol. 5. IEEE, 703-708. +[20] Intel. 2016. Intel®RealSense™ Camera SR300 Product Specifications. (2016). Retrieved 2018-04-13 from https://ark.intel.com/products/92329/Intel-RealSense-Camera-SR300 +[21] Zeci Lai, Y. Charlie Hu, Yong Cui, Linhui Sun, and Ningwei Dai. 2017. Furion: Engineering High-Quality Immersive Virtual Reality on Today's Mobile Devices. In Proceedings of the 23rd Annual International Conference on Mobile Computing and Networking (MobiCom '17). ACM, New York, NY, USA, 409-421. https://doi.org/10.1145/3117811.3117815 +[22] Kyungmin Lee, Jason Flinn, T.J. Giuli, Brian Noble, and Christopher Peplin. 2013. AMC: Verifying User Interface Properties for Vehicular Applications. In Proceeding of the 11th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '13). ACM, New York, NY, USA, 1-12. https://doi.org/10.1145/2462456.2464459 +[23] Gordon E Legge, Sing-Hang Cheung, Deyue Yu, Susana TL Chung, Hye-Won Lee, and Daniel P Owens. 2007. The case for the visual span as a sensory bottleneck in reading. Journal of Vision 7, 2 (2007), 9-9. +[24] Bin Liu, Suman Nath, Ramesh Govindan, and Jie Liu. 2014. DECAF: Detecting and Characterizing Ad Fraud in Mobile Apps. In 11th USENIX Symposium on Networked Systems Design and Implementation (NSDI 14). USENIX Association, Seattle, WA, 57-70. https://www.usenix.org/conference/nsdi14/technical-sessions/presentation/liu_bin +[25] Aravind Machiry, Rohan Tahiliani, and Mayur Naik. 2013. Dynodroid: An Input Generation System for Android Apps. In Proceedings of the 2013 9th Joint Meeting on Foundations of Software Engineering (ESEC/FSE 2013). ACM, New York, NY, USA, 224-234. https://doi.org/10.1145/2491411.2491450 +[26] Jennifer Mankoff, Holly Fait, and Ray Juang. 2005. Evaluating accessibility by simulating the experiences of users with vision or motor impairments. IBM Systems Journal 44, 3 (2005), 505-517. + +[27] Chulhong Min, Seungchul Lee, Changhun Lee, Youngki Lee, Seungwoo Kang, Seungpyo Choi, Wonjung Kim, and Junehwa Song. 2016. PADA: Power-aware Development Assistant for Mobile Sensing Applications. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '16). ACM, New York, NY, USA, 946-957. https://doi.org/10.1145/2971648.2971676 +[28] Chulhong Min, Youngki Lee, Chungkuk Yoo, Seungwoo Kang, Sangwon Choi, Pillsoon Park, Inseok Hwang, Younghyun Ju, Seungpyo Choi, and Junehwa Song. 2015. PowerForecaster: Predicting Smartphone Power Impact of Continuous Sensing Applications at Pre-installation Time. In Proceedings of the 13th ACM Conference on Embedded Networked Sensor Systems (SenSys '15). ACM, New York, NY, USA, 31-44. https://doi.org/10.1145/2809695.2809728 +[29] Produkt + Projekt Wolfgang Moll. [n. d.]. Age simulation suit GERT - the GERontic Test suit. ([n. d.]). Retrieved 2018-04-13 from http://www.age-simulation-suit.com/ +[30] Alan Newell and Peter Gregor. 1988. Human computer interaction for people with disabilities. (1988). +[31] Alan F Newell, Peter Gregor, Maggie Morgan, Graham Pullin, and Catriona Macaulay. 2011. User-sensitive inclusive design. Universal Access in the Information Society 10, 3 (2011), 235-243. +[32] Nvidia. 2016. GeForce GTX 1080 Specifications. (2016). Retrieved 2018-04-13 from https://www.geforce.com/hardware/Desktop-gpus/geforce-gtx-1080/ specifications +[33] National Institute on Aging. 2016. World's older population grows dramatically. (28 March 2016). Retrieved 2018-04-13 from https://www.nih.gov/news-events/news-releases/worlds-older-population-grows-dramatically +[34] DG Pelli, JG Robson, et al. 1988. The design of a new letter chart for measuring contrast sensitivity. In Clinical Vision Sciences. CiteSeer. +[35] Android Open Source Project. 2017. SurfaceFlinger and HardwareComposer. (March 2017). Retrieved 2018-04-13 from https://source.android.com/devices/ graphics/arch-sf-hwc +[36] Vaibhav Rastogi, Yan Chen, and William Enck. 2013. AppsPlayground: Automatic Security Analysis of Smartphone Applications. In Proceedings of the Third ACM Conference on Data and Application Security and Privacy (CODASPY '13). ACM, New York, NY, USA, 209-220. https://doi.org/10.1145/2435349.2435379 +[37] Lenin Ravindranath, Suman Nath, Jitendra Padhye, and Hari Balakrishnan. 2014. Automatic and Scalable Fault Detection for Mobile Applications. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 190-203. https://doi.org/10.1145/2594368.2594377 +[38] IBM Accessibility Research. 2017. IBM Accessibility Checklist 7.0. (18 July 2017). Retrieved 2018-04-13 from http://www-03.ibm.com/able/guidelines/ci162/accessibility_checklist.html +[39] Justin B. Rousek, Sonja Koneczny, and M. Susan Hallbeck. 2009. Simulating Visual Impairment to Detect Hospital Wayfinding Difficulties. Proceedings of the Human Factors and Ergonomics Society Annual Meeting 53, 8 (Oct. 2009), 531-535. +[40] Samsung. 2014. Samsung Galaxy S5 Specifications. (2014). Retrieved 2018-04-13 from http://www.samsung.com/uk/smartphones/galaxy-s5-g900f/SM-G900FZKABTU/ +[41] Alvy Ray Smith and James F. Blinn. 1996. Blue Screen Matting. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '96). ACM, New York, NY, USA, 259-268. https://doi.org/10.1145/237170.237263 +[42] Herman Snellen. 1873. Probebuchstaben zur bestimmung der sehscharfe. Vol. 1. H. Peters. +[43] G. J. Sullivan, J. R. Ohm, W. J. Han, and T. Wiegand. 2012. Overview of the High Efficiency Video Coding (HEVC) Standard. IEEE Transactions on Circuits and Systems for Video Technology 22, 12 (Dec 2012), 1649-1668. https://doi.org/10.1109/TCSVT.2012.2221191 +[44] Unity Technologies. [n. d.]. Unity. ([n. d.]). Retrieved 2018-04-13 from https://unity3d.com/ +[45] Sam Tregillus and Eelke Folmer. 2016. VR-STEP: Walking-in-Place Using Inertial Sensing for Hands Free Navigation in Mobile VR Environments. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). ACM, New York, NY, USA, 1250-1255. https://doi.org/10.1145/2858036.2858084 +[46] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing 13, 4 (2004), 600-612. +[47] Fabian Werfel, Roman Wiche, Jochen Feitsch, and Christian Geiger. 2016. Empathizing Audiovisual Sense Impairments: Interactive Real-Time Illustration of Diminished Sense Perception. In Proc. of AH. +[48] Yu Zhong, Astrid Weber, Casey Burkhardt, Phil Weaver, and Jeffrey P. Bigham. 2015. Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping. In Proceedings of the 12th Web for All Conference (W4A '15). ACM, New York, NY, USA, Article 29, 10 pages. https://doi.org/10.1145/2745555.2747277 +[49] George J. Zimmerman. 1979. Zimmerman Low Vision Simulation Kit. (1979). Retrieved 2018-04-13 from http://www.lowvisionsimulationkit.com/ \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12933/images/061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg b/data/2025/2503_12xxx/2503.12933/images/061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4e1413fd46d2ff17d39b6748f916d83cef3dca6 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcaba3c6dcbb8de1a57a4a4c117494d27f928f82fa0bce72820e8fa8c4f0feb1 +size 28316 diff --git a/data/2025/2503_12xxx/2503.12933/images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg b/data/2025/2503_12xxx/2503.12933/images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0decf436b67c65fd24aceaacef3434e6f37ee74 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0c5c4462bca831f10dcdf223646541b9da9e663ce5f3404a34fc9a633c4bc6c +size 7049 diff --git a/data/2025/2503_12xxx/2503.12933/images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg b/data/2025/2503_12xxx/2503.12933/images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6254e058f3419f4df4455cae71b8f1839abf02c3 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a21e5594a2cb23ed5285f89455716ea650da62248828707ce65816eb01a77a10 +size 14737 diff --git a/data/2025/2503_12xxx/2503.12933/images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg b/data/2025/2503_12xxx/2503.12933/images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfb6210199baac4a93818cec36f01197cc630453 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e48190ae57b97579fe5eb64d15acccdfcf8829d4f00105585a21ba0accd16568 +size 26031 diff --git a/data/2025/2503_12xxx/2503.12933/images/2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg b/data/2025/2503_12xxx/2503.12933/images/2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f6a456ca26d7a003f51f5f5d2540124aa16cbcf --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fc3b2fb0fe686b984d84ef9279b5108ac7cb05c419aa3110698414894fbf183 +size 26616 diff --git a/data/2025/2503_12xxx/2503.12933/images/6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg b/data/2025/2503_12xxx/2503.12933/images/6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..602a684d3f3d84652e0e5580876b58ca6b21fd0a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3586659b387b4418f8b1fae12f5d53b50bd092d455fb4b954c37ef2e64e341ec +size 32124 diff --git a/data/2025/2503_12xxx/2503.12933/images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg b/data/2025/2503_12xxx/2503.12933/images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb353e6d0384d9908a451ecf3425eac5cafc0e1a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c4463ab469d5580d7e6da1bf020dc028263a0259da27ad9450f42f814d3fd9a +size 27556 diff --git a/data/2025/2503_12xxx/2503.12933/images/8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg b/data/2025/2503_12xxx/2503.12933/images/8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b169f5db1d6727a3a7c8f1d5f940d4528d12cb8 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b125913f12dd0e3e8ff35624572b9319ad189fbecfe0cb8b8265ef4fb3b85ab +size 15362 diff --git a/data/2025/2503_12xxx/2503.12933/images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg b/data/2025/2503_12xxx/2503.12933/images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc7bd1ffbc08f07c0d21da915cc05acc49e19adc --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebf4a8a028726b0ad3d9f2a6d16ed3d2a967e03d6626b3b9f65c29a876deb202 +size 38599 diff --git a/data/2025/2503_12xxx/2503.12933/images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg b/data/2025/2503_12xxx/2503.12933/images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c6f36eba69d5d099082e0396bdcb690d2973c2d --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a244e3df8a6b7ef14d64981964acb1d16e7d4385e900f3508d875d6a9a65f69f +size 7193 diff --git a/data/2025/2503_12xxx/2503.12933/images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg b/data/2025/2503_12xxx/2503.12933/images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74b10c76a1d59cb2992846152c7e7f34c38e47d7 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e204a6dd38b6d7243f7a58be68e8ed26e2248ed3ee58817a7f8a61d2f8f9ce62 +size 6197 diff --git a/data/2025/2503_12xxx/2503.12933/images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg b/data/2025/2503_12xxx/2503.12933/images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..314e20aa6da8b2c11c2671ea30daa70be0cc3091 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5a732d5f9f432ae265dab358ac9a6e1ce0ae5c79d82b8b2221f1144829414eb +size 82256 diff --git a/data/2025/2503_12xxx/2503.12933/images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg b/data/2025/2503_12xxx/2503.12933/images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a9383839114b4e6e7f8b5f2874a31aeb37a7ef5 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c69a1457ef315cbcc1435120f6f6f51161092aaa5263215302f63406f3f5473e +size 20372 diff --git a/data/2025/2503_12xxx/2503.12933/images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg b/data/2025/2503_12xxx/2503.12933/images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edd0bedb565b4cd6892ac2eaf20886f8dd33b4f0 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:005015d4ea0f195c6f77d8fdde439feb602e4e235770eb588f1056a58b6cf565 +size 9782 diff --git a/data/2025/2503_12xxx/2503.12933/images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg b/data/2025/2503_12xxx/2503.12933/images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fe8f64012f60c43b0bded5c1d6ff8570fecad4b --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afff8327c795d8f885c6cb3ff65720a8d7c4f18e8961f21a2c2975da39ab3094 +size 14617 diff --git a/data/2025/2503_12xxx/2503.12933/images/f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg b/data/2025/2503_12xxx/2503.12933/images/f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..478f5adb8aec73d62f9163d6a77878d606961eeb --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/images/f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8828df63ef42a35458766e87be9444438ea54f1a9743f57967518af8883b80d +size 34638 diff --git a/data/2025/2503_12xxx/2503.12933/layout.json b/data/2025/2503_12xxx/2503.12933/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f5d5839c5d9e033b4584bbc6ad8666cd64d8c8e5 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12933/layout.json @@ -0,0 +1,12228 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 63, + 80, + 547, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 80, + 547, + 101 + ], + "spans": [ + { + "bbox": [ + 63, + 80, + 547, + 101 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 110, + 192, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 110, + 192, + 148 + ], + "spans": [ + { + "bbox": [ + 89, + 110, + 192, + 148 + ], + "type": "text", + "content": "Wonjung Kim\\* wjkim@nclab.kaist.ac.kr KAIST" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 233, + 110, + 378, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 110, + 378, + 149 + ], + "spans": [ + { + "bbox": [ + 233, + 110, + 378, + 149 + ], + "type": "text", + "content": "Kenny Tsu Wei Choo kenny.choo.2012@smu.edu.sg Singapore Management University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 397, + 110, + 542, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 110, + 542, + 149 + ], + "spans": [ + { + "bbox": [ + 397, + 110, + 542, + 149 + ], + "type": "text", + "content": "Youngki Lee \nyoungkilee@smu.edu.sg \nSingapore Management University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 187, + 158, + 258, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 158, + 258, + 170 + ], + "spans": [ + { + "bbox": [ + 187, + 158, + 258, + 170 + ], + "type": "text", + "content": "Archan Misra" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 177, + 171, + 269, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 171, + 269, + 183 + ], + "spans": [ + { + "bbox": [ + 177, + 171, + 269, + 183 + ], + "type": "text", + "content": "archanm@smu.edu.sg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 151, + 184, + 294, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 184, + 294, + 195 + ], + "spans": [ + { + "bbox": [ + 151, + 184, + 294, + 195 + ], + "type": "text", + "content": "Singapore Management University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 333, + 158, + 441, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 158, + 441, + 171 + ], + "spans": [ + { + "bbox": [ + 333, + 158, + 441, + 171 + ], + "type": "text", + "content": "Rajesh Krishna Balan" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 347, + 172, + 428, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 172, + 428, + 184 + ], + "spans": [ + { + "bbox": [ + 347, + 172, + 428, + 184 + ], + "type": "text", + "content": "rajesh@smu.edu.sg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 184, + 459, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 184, + 459, + 195 + ], + "spans": [ + { + "bbox": [ + 315, + 184, + 459, + 195 + ], + "type": "text", + "content": "Singapore Management University" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 202, + 113, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 202, + 113, + 213 + ], + "spans": [ + { + "bbox": [ + 51, + 202, + 113, + 213 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 217, + 296, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 217, + 296, + 415 + ], + "spans": [ + { + "bbox": [ + 50, + 217, + 296, + 415 + ], + "type": "text", + "content": "With app-based interaction increasingly permeating all aspects of daily living, it is essential to ensure that apps are designed to be inclusive and are usable by a wider audience such as the elderly, with various impairments (e.g., visual, audio and motor). We propose Empath-D, a system that fosters empathetic design, by allowing app designers, in-situ, to rapidly evaluate the usability of their apps, from the perspective of impaired users. To provide a truly authentic experience, Empath-D carefully orchestrates the interaction between a smartphone and a VR device, allowing the user to experience simulated impairments in a virtual world while interacting naturally with the app, using a real smartphone. By carefully orchestrating the VR-smarphone interaction, Empath-D tackles challenges such as preserving low-latency app interaction, accurate visualization of hand movement and low-overhead perturbation of I/O streams. Experimental results show that user interaction with Empath-D is comparable (both in accuracy and user perception) to real-world app usage, and that it can simulate impairment effects as effectively as a custom hardware simulator." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 424, + 135, + 435 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 424, + 135, + 435 + ], + "spans": [ + { + "bbox": [ + 51, + 424, + 135, + 435 + ], + "type": "text", + "content": "CCS CONCEPTS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 438, + 296, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 438, + 296, + 495 + ], + "spans": [ + { + "bbox": [ + 50, + 438, + 296, + 495 + ], + "type": "text", + "content": "- Human-centered computing " + }, + { + "bbox": [ + 50, + 438, + 296, + 495 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 438, + 296, + 495 + ], + "type": "text", + "content": " Systems and tools for interaction design; Ubiquitous and mobile computing systems and tools; Accessibility design and evaluation methods; Accessibility systems and tools; Ubiquitous and mobile computing design and evaluation methods;" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 503, + 117, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 503, + 117, + 514 + ], + "spans": [ + { + "bbox": [ + 51, + 503, + 117, + 514 + ], + "type": "text", + "content": "KEYWORDS" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 518, + 296, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 518, + 296, + 540 + ], + "spans": [ + { + "bbox": [ + 50, + 518, + 296, + 540 + ], + "type": "text", + "content": "empathetic design; accessibility; mobile design; virtual reality; multi-device, distributed user interfaces" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 544, + 141, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 544, + 141, + 553 + ], + "spans": [ + { + "bbox": [ + 51, + 544, + 141, + 553 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 50, + 554, + 295, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 554, + 295, + 585 + ], + "spans": [ + { + "bbox": [ + 50, + 554, + 295, + 585 + ], + "type": "text", + "content": "Wonjung Kim, Kenny Tsu Wei Choo, Youngki Lee, Archan Misra, and Rajesh Krishna Balan. 2018. Empath-D: VR-based Empathetic App Design for Accessibility. In MobiSys '18: The 16th Annual International Conference on Mobile" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 592, + 295, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 592, + 295, + 610 + ], + "spans": [ + { + "bbox": [ + 51, + 592, + 295, + 610 + ], + "type": "text", + "content": "*This work was done while the author was on an internship at Singapore Management University" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 675, + 190, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 675, + 190, + 684 + ], + "spans": [ + { + "bbox": [ + 51, + 675, + 190, + 684 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 51, + 685, + 186, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 685, + 186, + 693 + ], + "spans": [ + { + "bbox": [ + 51, + 685, + 186, + 693 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 51, + 685, + 186, + 693 + ], + "type": "text", + "content": " 2018 Association for Computing Machinery." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 51, + 693, + 180, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 180, + 700 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 180, + 700 + ], + "type": "text", + "content": "ACM ISBN 978-1-4503-5720-3/18/06...$15.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 51, + 700, + 167, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 700, + 167, + 709 + ], + "spans": [ + { + "bbox": [ + 51, + 700, + 167, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/3210240.3210331" + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 319, + 209, + 553, + 350 + ], + "blocks": [ + { + "bbox": [ + 319, + 209, + 553, + 350 + ], + "lines": [ + { + "bbox": [ + 319, + 209, + 553, + 350 + ], + "spans": [ + { + "bbox": [ + 319, + 209, + 553, + 350 + ], + "type": "image", + "image_path": "afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 370, + 364, + 504, + 375 + ], + "lines": [ + { + "bbox": [ + 370, + 364, + 504, + 375 + ], + "spans": [ + { + "bbox": [ + 370, + 364, + 504, + 375 + ], + "type": "text", + "content": "Figure 1: Overview of Empath-D" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 397, + 560, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 397, + 560, + 418 + ], + "spans": [ + { + "bbox": [ + 314, + 397, + 560, + 418 + ], + "type": "text", + "content": "Systems, Applications, and Services, June 10-15, 2018, Munich, Germany. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3210240.3210331" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 431, + 421, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 431, + 421, + 441 + ], + "spans": [ + { + "bbox": [ + 315, + 431, + 421, + 441 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 445, + 559, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 445, + 559, + 566 + ], + "spans": [ + { + "bbox": [ + 313, + 445, + 559, + 566 + ], + "type": "text", + "content": "Digital interactions have become increasingly commonplace and immersive. We now constantly interact with our personal devices and computing-enhanced ambient objects (such as coffeemakers, home automation systems and digital directories), while engaging in everyday activities, such as commuting, shopping or exercising. Given the ubiquity of such interactions, it is important to ensure that the associated computing interfaces remain accessible to segments of the population, such as the elderly, who suffer from various impairments. The global elderly population is projected to reach " + }, + { + "bbox": [ + 313, + 445, + 559, + 566 + ], + "type": "inline_equation", + "content": "16.7\\%" + }, + { + "bbox": [ + 313, + 445, + 559, + 566 + ], + "type": "text", + "content": " by 2050 [33], and such users suffer disproportionately from impairments (e.g., vision) that hinder accessibility." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 567, + 560, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 560, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 560, + 709 + ], + "type": "text", + "content": "To support more accessible design, our earlier work [11] introduced the vision of Empath-D, which uses a virtual reality (VR) device to provide mobile application/object designers with a realistic emulation of the interaction experience that impaired users would encounter. In this work, we present the design, implementation and validation of the Empath-D system inspired by this vision. Empath-D's goal is to allow unimpaired application designers to step into the shoes of impaired users and rapidly evaluate the usability of alternative prototypes. While we shall principally focus on empathetic evaluation of mobile applications (apps), Empath-D's design is generic enough to permit emulation of other real-world interactions-e.g., how an elderly user with cataracts and hearing loss would experience a traffic-light controlled pedestrian intersection." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.12933v1 [cs.HC] 17 Mar 2025" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 295, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 295, + 248 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 295, + 248 + ], + "type": "text", + "content": "Empath-D's " + }, + { + "bbox": [ + 50, + 84, + 295, + 248 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 50, + 84, + 295, + 248 + ], + "type": "text", + "content": " key idea is to present the user with an impairment-augmented view of the smartphone interface (or other digital objects) in a virtual world, while allowing the non-impaired user to perform natural interactions, using a physical smartphone, with a real-world instance of the smartphone app. At a high-level, Empath-D works as follows (see Figure 1): The (unimpaired) user uses a physical smartphone to perform real-world interactions (such as scrolls, taps or gestures) with the app, while wearing a VR device. The results of such interactions are projected instantaneously through the I/O interfaces (e.g., screen, speaker) of a 'virtual smartphone' visible in the VR display, but only after those I/O streams have been appropriately degraded by the specified impairment. For example, in Figure 1, the virtual phone's display (and the world view) has been appropriately vignetted, to mimic the experience of a user suffering from glaucoma." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 249, + 295, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 249, + 295, + 270 + ], + "spans": [ + { + "bbox": [ + 50, + 249, + 295, + 270 + ], + "type": "text", + "content": "Key Challenges: To mimic impairments with adequate fidelity and usability, Empath-D must support the following features:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 272, + 295, + 509 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 67, + 272, + 295, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 272, + 295, + 360 + ], + "spans": [ + { + "bbox": [ + 67, + 272, + 295, + 360 + ], + "type": "text", + "content": "- Fast, Accurate Multi-device Operation: Empath-D utilizes a split-interaction paradigm: a user interacts with an app using a real-world handheld smartphone, while perceiving (viewing, hearing) the app responses through the VR interface. To faithfully replicate the real-world experience, this split-mode interaction must have tight time coupling and visual fidelity (of the virtual phone's screen), comparable to direct interactions with a standalone smartphone." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 362, + 295, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 362, + 295, + 450 + ], + "spans": [ + { + "bbox": [ + 67, + 362, + 295, + 450 + ], + "type": "text", + "content": "- Real-time Tracking: To preserve a user's perception of naturalistic interactions, Empath-D must not only capture explicit phone events, but also mirrors physical actions taken by the user (e.g., swinging the phone around or having one's hand hover over the phone). Thus, Empath-D must also track and render, in real-time, the orientation/location of both the phone and the user's hand within the VR device's field-of-view." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 454, + 295, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 454, + 295, + 509 + ], + "spans": [ + { + "bbox": [ + 67, + 454, + 295, + 509 + ], + "type": "text", + "content": "- Lightweight Impairment Execution: To preserve the feel of natural interaction, Empath-D must insert the impairment-specific perturbations into the input/output streams with imperceptible latency or computational overhead (e.g., no reduction in video frame rate)." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 51, + 510, + 295, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 510, + 295, + 520 + ], + "spans": [ + { + "bbox": [ + 51, + 510, + 295, + 520 + ], + "type": "text", + "content": "Key Contributions: We make the following major contributions:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 521, + 295, + 689 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 67, + 521, + 295, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 521, + 295, + 641 + ], + "spans": [ + { + "bbox": [ + 67, + 521, + 295, + 641 + ], + "type": "text", + "content": "- 3-Tier Virtualisation Model: We design a novel 3-tier architecture where (i) the real-world smartphone serves merely as a tracker, forwarding user interaction events (e.g., screen touch and gestures) to a computationally powerful intermediary, after which (ii) the intermediary device perturbs those events by blending in specific input impairments (e.g., hand tremors) and passes them to an app instance running on a smartphone emulator, and finally (iii) the VR device receives the redirected outputs from this app instance and renders an appropriately-impaired (by blending in the output impairments) virtual world, including a virtual smartphone." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 644, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 644, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 67, + 644, + 295, + 689 + ], + "type": "text", + "content": "Real-time Hand and Phone Tracking: We use an RGB-Depth camera, mounted on the head-worn VR device, to track the outline of a user's hand, and subsequently perform a lightweight but realistic 3-D rendering of the hand on the VR" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 339, + 84, + 559, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 84, + 559, + 140 + ], + "spans": [ + { + "bbox": [ + 339, + 84, + 559, + 140 + ], + "type": "text", + "content": "display. We also use fiducial marker tracking [14] by the camera to track the position/orientation of the real-world smartphone. We demonstrate our ability to achieve both high-fidelity (pointing error " + }, + { + "bbox": [ + 339, + 84, + 559, + 140 + ], + "type": "inline_equation", + "content": "\\leq 5\\,mm" + }, + { + "bbox": [ + 339, + 84, + 559, + 140 + ], + "type": "text", + "content": ") and low-latency (end-to-end delays below 120 msec) hand tracking and display." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 331, + 143, + 559, + 389 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 331, + 143, + 559, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 143, + 559, + 254 + ], + "spans": [ + { + "bbox": [ + 331, + 143, + 559, + 254 + ], + "type": "text", + "content": "- Usability of Virtualized Phone, in Use Environments: We show that Empath-D is not just usable, but that user performance (absent impairments) using Empath-D's virtual smartphone is equivalent to real-world interaction with a smartphone. In addition, we allow usability testing of apps in their use environments, a key enabler for design of mobile applications which may be used anywhere. Our Samsung Gear VR-based prototype has end-to-end latency low-enough (only 96.3 msec of latency, excluding the mobile app emulation) to permit faithful reproduction of direct smartphone usage." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 331, + 257, + 559, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 257, + 559, + 389 + ], + "spans": [ + { + "bbox": [ + 331, + 257, + 559, + 389 + ], + "type": "text", + "content": "- Validation of Impairment Fidelity and Overall System: We implement two distinct vision (glaucoma & cataract) and one audio (high-frequency hearing loss) impairment in our Empath-D prototype. We then conduct a set of studies using the vision impairments, where 12 participants perform a series of standardised activities (e.g., add an alarm), using both our Empath-D prototype (test) and a commercial hardware vision impairment simulator (control) and establish that the performance of users is equivalent across the test and control groups. Finally, we conduct a small-scale study to provide preliminary evidence that our empathetic approach allows developers to design accessible mobile UIs faster and better." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 401, + 459, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 401, + 459, + 412 + ], + "spans": [ + { + "bbox": [ + 315, + 401, + 459, + 412 + ], + "type": "text", + "content": "2 THE EMPATH-D VISION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 415, + 512, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 415, + 512, + 426 + ], + "spans": [ + { + "bbox": [ + 314, + 415, + 512, + 426 + ], + "type": "text", + "content": "We use an example to illustrate the use of Empath-D:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 427, + 559, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 427, + 559, + 536 + ], + "spans": [ + { + "bbox": [ + 314, + 427, + 559, + 536 + ], + "type": "text", + "content": "Designing for Visual Impairment. Alice is designing a mobile app that automatically magnifies text from real environments seen through its rear camera to aid people who suffer from cataracts (a condition that dims and blurs vision). Alice starts Empath-D and is presented with a web interface that allows her to customise impairments (e.g., specify the intensity of visual blur). After customising the environment, Alice clicks in the Empath-D web interface to (1) compile the environment to her phone used for VR display (VR-phone)2 and (2) connect an input/output service to a separate phone (IO-phone). She then plugs the VR-phone into the VR headset." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 536, + 559, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 536, + 559, + 668 + ], + "spans": [ + { + "bbox": [ + 314, + 536, + 559, + 668 + ], + "type": "text", + "content": "Alice then compiles her Android app, and runs it in the Android emulator. She puts on the VR headset and holds the IO-phone in her hands. A virtual smartphone (Virt-phone) shows up in VR, tracking the real-world motion of the IO-phone. Alice now navigates through the virtual world, experiencing it as an \"impaired user, with cataracts\". She holds up IO-phone on a street corner (in the real world), and notices that the magnified text (as seen in the virtual phone in the virtual world) is not clear enough to be legible to a cataract-impaired user. She can now iteratively and rapidly modify her app, recompile it, and execute it in the Android emulator, until she is satisfied with the output. This scenario demonstrates the ease-of-use for Empath-D, with no need for special instrumentation of the app." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 237, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 237, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 237, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 484, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 484, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 484, + 57, + 559, + 68 + ], + "type": "text", + "content": "Wonjung Kim et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 699, + 212, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 699, + 212, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 699, + 212, + 709 + ], + "type": "text", + "content": "Video of Empath-D in action at https://is.gd/empath_d" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 689, + 559, + 707 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 689, + 559, + 707 + ], + "spans": [ + { + "bbox": [ + 314, + 689, + 559, + 707 + ], + "type": "text", + "content": "2The VR-phone is needed only for VR devices that require a smartphone-e.g., Samsung Gear VR" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 175, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 175, + 95 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 175, + 95 + ], + "type": "text", + "content": "3 SYSTEM OVERVIEW" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 99, + 230, + 112 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 99, + 230, + 112 + ], + "spans": [ + { + "bbox": [ + 51, + 99, + 230, + 112 + ], + "type": "text", + "content": "3.1 Design Goals and Implications" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 114, + 294, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 114, + 294, + 136 + ], + "spans": [ + { + "bbox": [ + 50, + 114, + 294, + 136 + ], + "type": "text", + "content": "Empath-D has the following key goals, which directly influence the salient implementation choices." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 146, + 295, + 704 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 67, + 146, + 295, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 146, + 295, + 299 + ], + "spans": [ + { + "bbox": [ + 67, + 146, + 295, + 299 + ], + "type": "text", + "content": "- Holistic emulation of impairments: For a truly empathetic experience, the app designer must perceive the effects of impairments not just while using the mobile app, but throughout her immersion in the virtual world. Consider a user, suffering from cataract, who is interacting with her smartphone while attending a dimly dit dinner gathering. Simply blurring the phone display, while leaving the background illumination and focus unchanged, might not replicate challenges in visual contrast that an impaired user would face in reality. This requirement precludes the straightforward use of I/O redirection techniques such as Rio [8], which can potentially perturb the I/O streams of only the mobile device. Instead, the impairment must be applied holistically, to the entire virtual world." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 300, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 300, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 67, + 300, + 295, + 486 + ], + "type": "text", + "content": "- Realistic emulation of smartphone and mobile apps in the virtual world: Empath-D aims at realistically emulating mobile apps within the virtual world rendered by a commodity VR headset. Realistic emulation of mobile apps imposes two requirements. (a) First, the virtual smartphone should have sufficient visual resolution, corresponding to typical usage where the smartphone is held " + }, + { + "bbox": [ + 67, + 300, + 295, + 486 + ], + "type": "inline_equation", + "content": "\\approx 30\\mathrm{cm}" + }, + { + "bbox": [ + 67, + 300, + 295, + 486 + ], + "type": "text", + "content": " away from the eye. We shall see (in Section 6.3) that this requirement, coupled with differences in display resolutions between smartphones and VR devices, requires careful magnification of the virtual smartphone to provide legibility without hampering usage fidelity. (b) Second, the user should not perceive any lag between her user input and the rendered view of the app, seen through the VR device. Quantitatively, we thus require that the task completion time, experienced by a user interacting with the emulated application in the virtual world, should be comparable to real-world app usage on a real smartphone." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 487, + 295, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 487, + 295, + 563 + ], + "spans": [ + { + "bbox": [ + 67, + 487, + 295, + 563 + ], + "type": "text", + "content": "- Use of unmodified app For easy and low-overhead adoption by app designers, Empath-D should support the emulation of mobile applications using the original, unmodified binaries (e.g., .apk for Android). Empath-D's requirement to support empathetic emulation without app modifications implies that app designers would be able to adopt Empath-D with minimal impact to existing development practices." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 563, + 295, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 563, + 295, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 563, + 295, + 704 + ], + "type": "text", + "content": "- Low-latency, accurate finger tracking: This goal is an extension of the holistic emulation objective. In the real-world, users utilise instantaneous visual feedback and proprioception to move their fingers around the smartphone display, even when they are hovering but not actually touching the display. To ensure consistency between the user's tactile, visual and proprioceptive perceptions of her hand movement, Empath-D should also realistically render, in the virtual world, the user's hand movements and any changes in the position/orientation of the real-world smartphone, without any perceptible lag. In Section 6, we shall see how the Empath-D implementation meets these stringent performance bounds." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 319, + 82, + 558, + 304 + ], + "blocks": [ + { + "bbox": [ + 319, + 82, + 558, + 304 + ], + "lines": [ + { + "bbox": [ + 319, + 82, + 558, + 304 + ], + "spans": [ + { + "bbox": [ + 319, + 82, + 558, + 304 + ], + "type": "image", + "image_path": "bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 370, + 316, + 504, + 327 + ], + "lines": [ + { + "bbox": [ + 370, + 316, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 370, + 316, + 504, + 327 + ], + "type": "text", + "content": "Figure 2:Empath- " + }, + { + "bbox": [ + 370, + 316, + 504, + 327 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 370, + 316, + 504, + 327 + ], + "type": "text", + "content": " architecture" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 331, + 344, + 570, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 344, + 570, + 409 + ], + "spans": [ + { + "bbox": [ + 331, + 344, + 570, + 409 + ], + "type": "text", + "content": "- Light-weight, effective emulation of impairments: Empath-D will need to emulate impairments, at different levels of severity. For high-fidelity empathetic emulation, the insertion of such impairments in the I/O streams of the smartphone should not add generate any additional artefacts (e.g., increased latency, reduction in display refresh rate, etc.)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 421, + 428, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 421, + 428, + 433 + ], + "spans": [ + { + "bbox": [ + 315, + 421, + 428, + 433 + ], + "type": "text", + "content": "3.2 System Overview" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 435, + 559, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 435, + 559, + 457 + ], + "spans": [ + { + "bbox": [ + 314, + 435, + 559, + 457 + ], + "type": "text", + "content": "We now present the overview of the Empath-D system (illustrated in Figure 2)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 457, + 559, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 457, + 559, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 559, + 598 + ], + "type": "text", + "content": "Using Empath-D in VR. To immersively evaluate the application, the developer (or the tester) starts by installing her developed application binaries (i.e., Android .apkss) to run on the emulated smartphone. The developer then adjusts the profile settings for the impairment using Empath-D's web dashboard and selects a use case scenario (e.g., in office, in the street, etc.). She holds her physical smartphone and puts on the VR headset, earphones (when hearing impairments are involved) and experiences the immersive reality (where she can use the app - now mapped onto the physical smartphone - with the configured impairment under the designated use case scenario) that Empath-D generates. She then tests out various interfaces and functionalities of the app in the immersive VR environments." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 600, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 600, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 314, + 600, + 559, + 632 + ], + "type": "text", + "content": "Components of Empath-D. Empath-D runs across three different physical devices: a physical smartphone, a computer, and a VR device (see Figure 2)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 633, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 633, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 633, + 559, + 710 + ], + "type": "text", + "content": "Smartphone: In Empath-D, the user interacts with the app using a real smartphone held in her hand. Interestingly, this smartphone does not run the app itself, but functions as a tracking device, helping to preserve the user's realistic sense of smartphone interaction. The smartphone simply redirects the user interaction events (e.g., touch events such as clicks and swipes on the display and motion events captured by inertial sensors) to the computer, which is in" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 282, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 282, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 282, + 69 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 374, + 58, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 58, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 374, + 58, + 559, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 84, + 294, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 84, + 294, + 117 + ], + "spans": [ + { + "bbox": [ + 51, + 84, + 294, + 117 + ], + "type": "text", + "content": "charge of the app emulation. This smartphone also displays a fiducial marker array [14] on its display, to help in efficient, real-time tracking of the phone's location." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 118, + 294, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 118, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 51, + 118, + 294, + 194 + ], + "type": "text", + "content": "Computer: The computer is at the heart of Empath-D's ability to fuse the real and virtual world. It consists of two major components: Phone and Hand Tracker and Mobile Emulator, as well as a Web Dashboard (see Figure 6), which allows the user to select the impairment profile to be applied. In addition, as we shall discuss shortly, this computer may run an Impairment Generator cum Virtual World Renderer). Key functions include:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 194, + 294, + 351 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 67, + 194, + 294, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 194, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 194, + 294, + 270 + ], + "type": "text", + "content": "- The Phone and Hand Tracker, uses image captured by the VR headset-mounted camera, to track the position and pose of the smartphone (relative to the VR device), and create the virtual phone image at the correct position in the virtual world. It also uses the same camera to track the user's hand, as it interacts with the smartphone, and then renders it in the virtual world." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 275, + 294, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 275, + 294, + 351 + ], + "spans": [ + { + "bbox": [ + 67, + 275, + 294, + 351 + ], + "type": "text", + "content": "- The Mobile Emulator executes the app being tested, using the redirected stream of user interaction events transmitted by the smartphone. The resulting visual output of the app is then transmitted as a sequence of images to the VR device, where these images will be integrated into the virtual phone object; likewise, audio output (if any) is directly streamed to the VR device." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 352, + 295, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 352, + 295, + 548 + ], + "spans": [ + { + "bbox": [ + 50, + 352, + 295, + 548 + ], + "type": "text", + "content": "The overall Empath-D framework includes an Impairment Generator that is typically applied as one or more filters over the Virtual World Renderer (an engine such as Unity [44]) which is responsible for combining various virtual objects and rendering the virtual world). The Impairment Generator effectively perturbs/modifies the audio/video feeds of the virtual world, before they are displayed on the VR device. For example, to emulate cataracts, it applies an appropriate 'blurring/dimming' filter on the video feed; similarly to emulate high-frequency hearing loss (an audio impairment), this generator will apply a low-pass filter on the output audio stream. These two components are placed inside a dotted-line rectangle in Figure 2, to reflect the reality that these components run on either the Computer or the VR device, depending on whether the VR device is tethered or not. In untethered VR devices (such as the Samsung Gear VR), the Impairment Generator and the Virtual World Renderer run on the VR device itself. In contrast, tethered devices such as the HTC Vive will run on the computer, and typically offer higher graphics quality, frame rates, faster execution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 549, + 295, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 549, + 295, + 594 + ], + "spans": [ + { + "bbox": [ + 50, + 549, + 295, + 594 + ], + "type": "text", + "content": "VR Device: Finally, the VR device is used to display the synthesised virtual world to the user. This synthesis involves the fusion of the virtual smartphone, the user's hand and the ambient virtual world, all subject to the impairment filter." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 604, + 254, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 604, + 254, + 628 + ], + "spans": [ + { + "bbox": [ + 51, + 604, + 254, + 628 + ], + "type": "text", + "content": "4 VR-BASED EMULATION OF MOBILE INTERACTION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 632, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 632, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 632, + 295, + 708 + ], + "type": "text", + "content": "Empath-D follows a split-interaction paradigm: for realistic immersion, Empath-D renders the visual and audio output of the target app in the virtual world (i.e., via VR headset's display and speakers), while allowing the user to interact naturalistically with a real-world physical phone. A major challenge in this paradigm is to enable natural, low-latency tracking and display of the real-world motion of both the phone and the user's hands, so as to ensure consistency" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "text", + "content": "across the user's visual, tactile and proprioceptive experience. We achieve this by performing three distinct steps: (a) smartphone tracking, (b) hand tracking, and (c) hand rendering in VR, using an RGB-Depth (RGB-D) camera mounted on the VR headset. Empath-D first tracks the position and orientation of the physical smartphone and synchronises the position of the virtual phone to the physical smartphone (See Section 4.1). Separately, Empath-D also captures fingers in the real world and displays them at the correct position (relative to the virtual smartphone) in the virtual world (See Section 4.2 and 4.3)." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 358, + 209, + 516, + 369 + ], + "blocks": [ + { + "bbox": [ + 358, + 209, + 516, + 369 + ], + "lines": [ + { + "bbox": [ + 358, + 209, + 516, + 369 + ], + "spans": [ + { + "bbox": [ + 358, + 209, + 516, + 369 + ], + "type": "image", + "image_path": "2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 380, + 553, + 392 + ], + "lines": [ + { + "bbox": [ + 321, + 380, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 321, + 380, + 553, + 392 + ], + "type": "text", + "content": "Figure 3: Tracking physical phone with fiducial markers" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "text", + "content": "Empath-D uses the headset-mounted RGB-D camera to capture the colour image along with the depth values, relative to the camera. The camera's position is always fixed, relative to the user's head. Its three axes are thus aligned to a user's head: " + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "text", + "content": "-axis to the user's forward (gaze) direction, and " + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 409, + 559, + 475 + ], + "type": "text", + "content": " axes capturing the vertical and horizontal displacement." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 486, + 513, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 486, + 513, + 499 + ], + "spans": [ + { + "bbox": [ + 314, + 486, + 513, + 499 + ], + "type": "text", + "content": "4.1 Tracking the physical smartphone" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 500, + 559, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 500, + 559, + 566 + ], + "spans": [ + { + "bbox": [ + 313, + 500, + 559, + 566 + ], + "type": "text", + "content": "Empath-D uses fiducial markers, displayed on the physical smartphone's screen, to localise the smartphone efficiently. It takes a colour image as an input, and returns the transformation relative to the camera's coordinate system: translation and rotation, i.e., x, y, z, roll, pitch, yaw from the RGB-D camera's coordinate system. We employ a technique proposed and detailed in [14]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 566, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 566, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 566, + 559, + 708 + ], + "type": "text", + "content": "The Empath-D Hand Tracker component tracks the physical phone using markers captured by the camera. Each marker, displayed on the phone screen, has a distinct pattern. The tracker knows the position of each marker (e.g., top-left, top-right, bottom-left and bottom-right) in the physical smartphone screen's coordinate system. The system first detects these markers in a given colour image, identifying them based on their unique patterns (see Figure 3). In particular, the system recognises the coordinates of each of the four corners of each marker. Moreover, the system knows the true size of, and separation between, each marker. It then uses an object pose estimation algorithm (provided by openCV's solvePnP function [6]), along with the array of fiducial marker points, to compute the 3-D position and orientation of the smartphone. Past" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 58, + 236, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 58, + 236, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 58, + 236, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "type": "text", + "content": "Wonjung Kim et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 57, + 99, + 294, + 308 + ], + "blocks": [ + { + "bbox": [ + 52, + 83, + 178, + 95 + ], + "lines": [ + { + "bbox": [ + 52, + 83, + 178, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 178, + 95 + ], + "type": "text", + "content": "Algorithm 1 Hand Segmentation" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "lines": [ + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "spans": [ + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": "1: Input: " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "T\\gets" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " Phone's translation (3-D vector) \n2: Input: " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "R\\gets" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " Phone's orientation " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "(3\\times 3" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " rotation matrix), \n3: Input: " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "F\\gets" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " RGBD Frame, 2-D array that each entry " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "F_{i,j}" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " holds a color value and 3-D position relative to the camera. \n4: Input: " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "V\\gets" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " 3-D region of interest (relative to the phone) \n5: Output: fgMask, 2D bool array whose dimension equals to " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " \n6: \n7: fgMask[i,j] " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " false for all " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " \n8: for point " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " do \n9: if " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " in screen_border then \n10: /\\* Case A: Blue background segmentation \\*/ \n11: fgMask[i,j] " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " 1-Blue " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "(F_{i,j}) + 0.5\\cdot Red(F_{i,j}) > \\tau" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " \n12: else \n13: /\\* Case B: Depth-based segmentation \\*/ \n14: posphone " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "R^{-1}\\cdot (Position(F_{i,j}) - T)" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " \n15: fgMask[i,j] " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": " (posphone " + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "inline_equation", + "content": "\\in V" + }, + { + "bbox": [ + 57, + 99, + 294, + 308 + ], + "type": "text", + "content": ") \n16: end if \n17: end for \n18: return fgMask" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + }, + { + "bbox": [ + 50, + 340, + 295, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 340, + 295, + 362 + ], + "spans": [ + { + "bbox": [ + 50, + 340, + 295, + 362 + ], + "type": "text", + "content": "results [14] show that this technique can compute an object's position and orientation with sub-cm level accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 362, + 295, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 362, + 295, + 462 + ], + "spans": [ + { + "bbox": [ + 50, + 362, + 295, + 462 + ], + "type": "text", + "content": "This fiducial marker-based algorithm would fail under two conditions: (a) when the markers are occluded by the user's hand, and (b) if the ambient illumination levels are too low or too high, reducing the contrast level of the markers. To tackle (a), the smartphone screen uses an entire array of markers displayed across the scene, thereby ensuring correct smartphone tracking as long as some part of the phone is visible. Contrast concerns are not particularly relevant in our scenario, as we assume that the user is testing the app in a regularly lit work/office environment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 474, + 177, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 474, + 177, + 487 + ], + "spans": [ + { + "bbox": [ + 51, + 474, + 177, + 487 + ], + "type": "text", + "content": "4.2 Hand Segmentation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "spans": [ + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "text", + "content": "Empath-D uses the frames captured by the RGB-D camera to track and segment the user's hand. For each frame, we extract the segment (polygon of pixels) that represents the user's hand, and render that segment in the virtual world. As the goal of hand-tracking is to provide the user with a natural view of her smartphone interactions, we restrict the tracking technique to a 3-D region of interest (ROI) that is centred at the phone, with a depth of " + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "inline_equation", + "content": "2cm" + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "text", + "content": " and a planar boundary of " + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "inline_equation", + "content": "6cm" + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "text", + "content": ". In other words, we only track the hand while it is " + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "inline_equation", + "content": "\\leq 2cms" + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "text", + "content": " away from the smartphone screen, and within " + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "inline_equation", + "content": "\\leq 6cms" + }, + { + "bbox": [ + 50, + 488, + 295, + 598 + ], + "type": "text", + "content": " of the smartphone edges." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 598, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 598, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 598, + 295, + 708 + ], + "type": "text", + "content": "A straightforward approach is to apply a depth-based segmentation strategy, where we first isolate only the foreground points which lie within a depth " + }, + { + "bbox": [ + 50, + 598, + 295, + 708 + ], + "type": "inline_equation", + "content": "= 2cm" + }, + { + "bbox": [ + 50, + 598, + 295, + 708 + ], + "type": "text", + "content": " of the smartphone surface. However, we empirically observed that, due to the glossy surface of the smartphone, such depth estimation was inaccurate for points located on the smartphone's screen. Accordingly, we implemented two separate segmentation methods (detailed in Algorithm 1): (case A) a colour-based segmentation approach to identify points which are directly over the smartphone, and (case B) a depth-based approach to identify points which are near, but not over, the smartphone's" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 332, + 82, + 432, + 213 + ], + "blocks": [ + { + "bbox": [ + 332, + 82, + 432, + 213 + ], + "lines": [ + { + "bbox": [ + 332, + 82, + 432, + 213 + ], + "spans": [ + { + "bbox": [ + 332, + 82, + 432, + 213 + ], + "type": "image", + "image_path": "f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 334, + 224, + 429, + 235 + ], + "lines": [ + { + "bbox": [ + 334, + 224, + 429, + 235 + ], + "spans": [ + { + "bbox": [ + 334, + 224, + 429, + 235 + ], + "type": "text", + "content": "Figure 4: Mesh of hand" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 440, + 82, + 545, + 213 + ], + "blocks": [ + { + "bbox": [ + 440, + 82, + 545, + 213 + ], + "lines": [ + { + "bbox": [ + 440, + 82, + 545, + 213 + ], + "spans": [ + { + "bbox": [ + 440, + 82, + 545, + 213 + ], + "type": "image", + "image_path": "b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 224, + 547, + 246 + ], + "lines": [ + { + "bbox": [ + 436, + 224, + 547, + 246 + ], + "spans": [ + { + "bbox": [ + 436, + 224, + 547, + 246 + ], + "type": "text", + "content": "Figure 5:Empath- " + }, + { + "bbox": [ + 436, + 224, + 547, + 246 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 436, + 224, + 547, + 246 + ], + "type": "text", + "content": " hand segmentation" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 267, + 559, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 267, + 559, + 300 + ], + "spans": [ + { + "bbox": [ + 314, + 267, + 559, + 300 + ], + "type": "text", + "content": "screen. We apply the colour-based segmentation to the points inside the screen's border (thick orange contour in Figure 3) and the depth-based approach to the points outside." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "spans": [ + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "text", + "content": "Colour-based segmentation: We adopt the colour-based technique proposed in [41]. The approach tests RGB values to segment foreground (hand) from background, coloured in blue. In our scenario, we target human skin as the foreground. Human skin has a property common in all races: its R value has about twice the value of G and B (" + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "inline_equation", + "content": "R \\approx 2G \\approx 2B" + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "text", + "content": "). Given the property of human skin, we obtain a formula that discriminates the foreground from the background whose " + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "text", + "content": " value is 1 (line 11 in Algorithm 1). " + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 314, + 300, + 559, + 399 + ], + "type": "text", + "content": " is a user-tunable threshold which allows it to adapt to different lighting conditions." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "spans": [ + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": "However, note that, to enable tracking of the phone, the phone's screen cannot be completely blue, but will need to contain the array of fiducial markers. We tackle both problems simultaneously by using blue (" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "R = 0" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "G = 0" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "B = 1" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": ") to colour the markers, over a cyan (" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "R = 0" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "G = 1" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "B = 1" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": ") background. Here we modified only " + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 314, + 399, + 559, + 464 + ], + "type": "text", + "content": " value, which is unused in the colour-based segmentation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 464, + 559, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 559, + 519 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 559, + 519 + ], + "type": "text", + "content": "Points outside the smartphone's screen are segmented using the depth-based approach. After identifying the points corresponding to the user's hand, the system translates these points to 3-D coordinates in the camera's coordinate system, using the associated depth values." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 529, + 541, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 529, + 541, + 542 + ], + "spans": [ + { + "bbox": [ + 315, + 529, + 541, + 542 + ], + "type": "text", + "content": "4.3 Rendering the hand in the virtual world" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 544, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 544, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 544, + 559, + 709 + ], + "type": "text", + "content": "After detecting the hand segment, the Empath-D system renders it in the virtual world. The system passes the tracked hands to the Virtual World Renderer, sharing the (i) 3D structure of the hands (surface mesh), (ii) colour image of the RGB-D frame (texture), and (iii) mapping between the surface mesh and the colour image (UV map). In common rendering engines (e.g. Unity), the 3D structure of the hand is represented by a triangle mesh-i.e., a set of vertices, constituting individual small triangles. The mesh is rendered at the same location as the user's hand in the real world. As the user's hand is localised in the coordinates of the RGB-D depth camera, the location is offset by an additional depth value (7cm in our implementation), to reflect the additional distance between the centre of the user's eyes and the depth camera. An important characteristic of our algorithm is that we render the actual image of the user's hands over this triangle mesh. Figure 4 illustrates the Delaunay" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 374, + 58, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 58, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 374, + 58, + 559, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 87, + 175, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 87, + 175, + 99 + ], + "spans": [ + { + "bbox": [ + 69, + 87, + 175, + 99 + ], + "type": "text", + "content": "Empath-D Dashboard" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 114, + 200, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 114, + 200, + 124 + ], + "spans": [ + { + "bbox": [ + 70, + 114, + 200, + 124 + ], + "type": "text", + "content": "cataract (blur and contrast reduction)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 131, + 93, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 131, + 93, + 137 + ], + "spans": [ + { + "bbox": [ + 70, + 131, + 93, + 137 + ], + "type": "text", + "content": "enabled*" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 144, + 97, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 144, + 97, + 150 + ], + "spans": [ + { + "bbox": [ + 70, + 144, + 97, + 150 + ], + "type": "text", + "content": "enabled" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 156, + 107, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 156, + 107, + 163 + ], + "spans": [ + { + "bbox": [ + 70, + 156, + 107, + 163 + ], + "type": "text", + "content": "Blur intensity*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 178, + 79, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 178, + 79, + 184 + ], + "spans": [ + { + "bbox": [ + 70, + 178, + 79, + 184 + ], + "type": "text", + "content": "0.1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 191, + 141, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 191, + 141, + 198 + ], + "spans": [ + { + "bbox": [ + 70, + 191, + 141, + 198 + ], + "type": "text", + "content": "Contrast reduction intensity" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 213, + 75, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 213, + 75, + 219 + ], + "spans": [ + { + "bbox": [ + 70, + 213, + 75, + 219 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 286, + 295, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 286, + 295, + 341 + ], + "spans": [ + { + "bbox": [ + 50, + 286, + 295, + 341 + ], + "type": "text", + "content": "triangulation of a set of points. The mesh is combined with the hand's image (Figure 5), and rendered in the VR display. Extracting and rendering the actual image of the user's finger enhances the immersive feeling of real-life smartphone navigation in the virtual world." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 342, + 295, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 342, + 295, + 507 + ], + "spans": [ + { + "bbox": [ + 50, + 342, + 295, + 507 + ], + "type": "text", + "content": "The complexity of the mesh-i.e., the number of vertices (or triangles) in the rendered hand-is an important parameter in the rendering process. A larger number of vertices captures the contours of the hand more precisely, resulting in a more life-like image. However, this also results in added rendering latency in the rendering engine. To support the twin objectives of low-latency and life-like rendering, we utilise a sub-sampling technique to construct the mesh. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a 32-fold downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the row and column axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We shall show, in Section 6, how our prototype Empath-D implementation uses this technique to achieve our twin objectives." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 518, + 213, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 518, + 213, + 529 + ], + "spans": [ + { + "bbox": [ + 51, + 518, + 213, + 529 + ], + "type": "text", + "content": "5 IMPAIRMENT SIMULATION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 533, + 295, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 533, + 295, + 577 + ], + "spans": [ + { + "bbox": [ + 50, + 533, + 295, + 577 + ], + "type": "text", + "content": "Empath-D aims to enable evaluation of the usability of app designs under visual, auditory and haptic impairment simulation. Realistic simulation of various impairments in the VR world is the essential requirement to achieve this goal." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 577, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 577, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 577, + 295, + 708 + ], + "type": "text", + "content": "There has been a thread of research to simulate impairments through physical simulator devices [1, 13, 29, 39, 49]. For instance, Zimmerman et al. use goggles and enclosing materials to simulate low vision impairments [49]. These hardware simulators generalise the impairment of interest and enable simulation of specific aspects of the impairment pathology rather than emulate exactly how an impairment is. However, impairments can vary greatly between individuals. For instance, glaucoma generally progresses in deterioration from the periphery towards the centre of vision, but in reality, it comes in different shapes and severity, affecting usability of applications in different ways. Existing physical impairment simulators simply approximate this as a central circle of" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 320, + 82, + 433, + 192 + ], + "blocks": [ + { + "bbox": [ + 50, + 237, + 294, + 258 + ], + "lines": [ + { + "bbox": [ + 50, + 237, + 294, + 258 + ], + "spans": [ + { + "bbox": [ + 50, + 237, + 294, + 258 + ], + "type": "text", + "content": "Figure 6: Screenshot of Empath-D impairment configuration dashboard" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 82, + 433, + 192 + ], + "lines": [ + { + "bbox": [ + 320, + 82, + 433, + 192 + ], + "spans": [ + { + "bbox": [ + 320, + 82, + 433, + 192 + ], + "type": "image", + "image_path": "b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 440, + 82, + 553, + 192 + ], + "blocks": [ + { + "bbox": [ + 440, + 82, + 553, + 192 + ], + "lines": [ + { + "bbox": [ + 440, + 82, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 440, + 82, + 553, + 192 + ], + "type": "image", + "image_path": "11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 202, + 558, + 224 + ], + "lines": [ + { + "bbox": [ + 314, + 202, + 558, + 224 + ], + "spans": [ + { + "bbox": [ + 314, + 202, + 558, + 224 + ], + "type": "text", + "content": "Figure 7: Simulated cataract (left) and simulated glaucoma (right)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 261, + 559, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 261, + 559, + 327 + ], + "spans": [ + { + "bbox": [ + 314, + 261, + 559, + 327 + ], + "type": "text", + "content": "clarity, with blur through to the periphery. Empath-D is advantageous over existing physical simulators in the following ways, it allows: 1) impairments to be customised, 2) simultaneous manifestation of multiple impairments, 3) the addition of new impairments easily. Figure 6 shows the web interface for designers to customise impairments for the target user group." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 342, + 498, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 342, + 498, + 354 + ], + "spans": [ + { + "bbox": [ + 315, + 342, + 498, + 354 + ], + "type": "text", + "content": "5.1 Simulating Visual Impairments" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 356, + 559, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 356, + 559, + 498 + ], + "spans": [ + { + "bbox": [ + 313, + 356, + 559, + 498 + ], + "type": "text", + "content": "Vision is the dominant sensory system by which humans perceive the world, and is a key focus for Empath-D. Vision impairment is one of the most common causes of accessibility problems that comes with age. Common vision impairments include cataracts, glaucoma, and age-related macular degeneration. Such vision impairments present as reduced visual acuity, loss of central/peripheral vision, or decreased contrast sensitivity. It is widely studied that these symptoms can affect the interaction with various desktop and mobile applications; for example, humans use peripheral vision to pre-scan text ahead of his/her point of focus. As the peripheral vision narrows, the scanning becomes less effective, which slows reading [23]. In this work, we examine and simulate two commonly found visual impairments - cataracts and glaucoma." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 499, + 559, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 499, + 559, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 499, + 559, + 597 + ], + "type": "text", + "content": "Our approach is to apply an image effect at the \"eye\" (i.e., a camera pair of view renderers) of the VR scene. From this camera pair, the image effect will apply to all other objects in the scene (e.g., smartphone, fingers, scene), just as how impaired users would experience it. We employed various image filters for different impairments, which 1) provide realism of impairments to help designers to find out usability issues and take corrective actions, and 2) have small computational overhead not to add noticeable delays to our entire emulation." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 597, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 597, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 597, + 559, + 708 + ], + "type": "text", + "content": "The approach is flexible and lightweight. Impairment simulator's intensity is configurable at runtime. The image effects are applied at the last stage of the rendering pipeline. Glaucoma presents functionally as a loss in peripheral vision. To simulate glaucoma, we use a vignette with a clear inner circle, blurred inner-outer circle, and black extending outwards from the outer circle (see Figure 7). Cataracts presents functionally as reduced visual acuity and reduced contrast sensitivity. We use a blur filter to simulate reduced visual acuity, and a contrast reduction filter to simulate reduced contrast sensitivity (see Figure 7)." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 236, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 236, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 236, + 68 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "type": "text", + "content": "Wonjung Kim et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 63, + 105, + 281, + 186 + ], + "blocks": [ + { + "bbox": [ + 107, + 83, + 239, + 95 + ], + "lines": [ + { + "bbox": [ + 107, + 83, + 239, + 95 + ], + "spans": [ + { + "bbox": [ + 107, + 83, + 239, + 95 + ], + "type": "text", + "content": "Table 1: Hardware of Empath-D" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 63, + 105, + 281, + 186 + ], + "lines": [ + { + "bbox": [ + 63, + 105, + 281, + 186 + ], + "spans": [ + { + "bbox": [ + 63, + 105, + 281, + 186 + ], + "type": "table", + "html": "
VR headsetSamsung Gear VR [5]
VR smartphoneSamsung Galaxy S7 [4]
RGB-D cameraIntel RealSense SR300 [20]
PCCPU: 4 cores, 3.4 GHz\nRAM: 16 GB\nGPU: GeForce GTX 1080 [32]
Physical IO smartphoneSamsung Galaxy S5 [40]
", + "image_path": "6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 205, + 295, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 205, + 295, + 304 + ], + "spans": [ + { + "bbox": [ + 50, + 205, + 295, + 304 + ], + "type": "text", + "content": "The functional aspects of vision impairments are straightforward to create in VR, which give Empath-D high extendability to implement other types of visual impairments. While we just described two impairments pertaining to our studies, it is easy to create other impairments such as colour filters to simulate colour blindness. However, we leave the effect of eye movements on impairments as the future work. Since eye-tracking is currently not supported in Empath-D, a user will need to move his head to achieve the same effect." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 314, + 219, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 314, + 219, + 327 + ], + "spans": [ + { + "bbox": [ + 51, + 314, + 219, + 327 + ], + "type": "text", + "content": "5.2 Simulating Other Modalities" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 328, + 291, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 328, + 291, + 339 + ], + "spans": [ + { + "bbox": [ + 50, + 328, + 291, + 339 + ], + "type": "text", + "content": "We discuss how other modalities may be simulated in Empath-D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 340, + 295, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 340, + 295, + 448 + ], + "spans": [ + { + "bbox": [ + 50, + 340, + 295, + 448 + ], + "type": "text", + "content": "Hand Tremors. Hand tremors are a common symptom of Parkinson's disease or Essential tremor and make it hard for one to precisely point on a touchscreen. A hand tremor may be characterised by the frequency and amplitude of oscillatory movement. Since we present virtual representations of the user's hand (i.e., as a 3D mesh) to enable his interaction with the virtual mobile phone, Empath-D similarly perturbs this 3D mesh in VR to create hand tremors. While a user may physically not experience hand movement, the visual perturbation would be sufficient to hinder accurate touch to simulate hand tremors." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "spans": [ + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "text", + "content": "Hearing Loss. High-frequency hearing loss is a common symptom for the elderly population. People diagnosed with high-frequency hearing loss are unable to hear sounds between " + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "inline_equation", + "content": "2,000\\mathrm{Hz}" + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "text", + "content": " and 8,000 Hz. These people often struggle to understand or keep up with daily conversations (missing consonants in higher registers, such as the letters F and S or female voices). Empath-D applies a bandpass filter over the output sound of the target application to diminish the sound signals between " + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "inline_equation", + "content": "2\\mathrm{kHz}" + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "inline_equation", + "content": "8\\mathrm{kHz}" + }, + { + "bbox": [ + 50, + 449, + 298, + 548 + ], + "type": "text", + "content": " and plays the filtered audio feed through the VR device." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 558, + 170, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 558, + 170, + 569 + ], + "spans": [ + { + "bbox": [ + 51, + 558, + 170, + 569 + ], + "type": "text", + "content": "6 IMPLEMENTATION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 574, + 128, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 574, + 128, + 585 + ], + "spans": [ + { + "bbox": [ + 51, + 574, + 128, + 585 + ], + "type": "text", + "content": "6.1 Hardware" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 588, + 295, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 588, + 295, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 588, + 295, + 709 + ], + "type": "text", + "content": "We implemented our current Empath-D prototype using the hardware described in Table 1. We used the Samsung Gear VR fitted with the Samsung Galaxy S7 as the VR headset. We used the Intel RealSense SR300 RGB-D camera for finger tracking, selecting this among alternatives as: 1) its small size and low weight allowed us to easily attach it to the VR headset, and 2) its minimum sensing range is low enough to permit hand tracking at a distance of " + }, + { + "bbox": [ + 50, + 588, + 295, + 709 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 588, + 295, + 709 + ], + "type": "text", + "content": ". We employed the Samsung Galaxy S5 as the physical I/O device, and a powerful laptop (4 core 3.4 GHz CPU, 16GB RAM) as the intermediary device. The choice of the VR headset itself was deliberate. We chose a Samsung Gear VR headset (an untethered" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 375, + 84, + 501, + 198 + ], + "blocks": [ + { + "bbox": [ + 375, + 84, + 501, + 198 + ], + "lines": [ + { + "bbox": [ + 375, + 84, + 501, + 198 + ], + "spans": [ + { + "bbox": [ + 375, + 84, + 501, + 198 + ], + "type": "image", + "image_path": "1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 211, + 560, + 244 + ], + "lines": [ + { + "bbox": [ + 314, + 211, + 560, + 244 + ], + "spans": [ + { + "bbox": [ + 314, + 211, + 560, + 244 + ], + "type": "text", + "content": "Figure 8: Rendering frame rate under varying virtual display resolution (width : height = 9 : 16, default resolution of Android emulator is 1080x1920)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 266, + 559, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 266, + 559, + 323 + ], + "spans": [ + { + "bbox": [ + 314, + 266, + 559, + 323 + ], + "type": "text", + "content": "smartphone-powered VR device) over more powerful PC-tethered VR devices such as the HTC Vive or Oculus Rift. This was mainly because PC-tethered devices such as HTC Vive use IR lasers to localise the headset, which interferes with the IR laser emitted by the RGB-D camera used for depth sensing in hand tracking." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 332, + 484, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 332, + 484, + 346 + ], + "spans": [ + { + "bbox": [ + 315, + 332, + 484, + 346 + ], + "type": "text", + "content": "6.2 Rendering an Emulated App" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 347, + 560, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 347, + 560, + 478 + ], + "spans": [ + { + "bbox": [ + 313, + 347, + 560, + 478 + ], + "type": "text", + "content": "We used empirical studies to determine an appropriate screen resolution and frame rate to render the emulated app (and the smartphone) in the VR headset. Empath-D obtains screenshots of its mobile emulator using the Android virtual display [35] and transmits these screenshots over WiFi to the Gear VR device. The overhead of transmitting and rendering these emulated screenshots is proportional to their resolution. The default 1080p resolution could sustain a frame rate of only 18 fps, which causes visible jerkiness. To reduce this overhead, we reduced the resolution (using setDisplayProjection() method), and applied differential transmissions, sending a screenshot only when the emulated app's display changes." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "text", + "content": "Figure 8 shows the experimental results on the tradeoff between the resolution and the rendering frame rate, obtained while playing a video to ensure continuous change of the screen content. The frame rate saturates at " + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "inline_equation", + "content": "57~\\text{fps}" + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "text", + "content": ", at a screen resolution of " + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "inline_equation", + "content": "485\\times 863" + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "text", + "content": ". Moreover, through another user study (described next) to understand the minimum resolution to read an app's contents, we empirically verified that the participants had no issues in reading the app's content at the resolution of " + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "inline_equation", + "content": "485\\times 863" + }, + { + "bbox": [ + 313, + 479, + 560, + 598 + ], + "type": "text", + "content": ". Hence, we choose this resolution as our default, although this setting can be modified (e.g., we can pick a higher resolution, and a lower frame rate, for an app with mostly static content)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "text", + "content": "If Empath-D displays the virtual smartphone at its original size in the virtual world (portrait position), its display becomes illegible. For example, the Samsung Galaxy S7 (in the Gear VR) has a resolution of " + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "inline_equation", + "content": "2560 \\times 1440" + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "text", + "content": " and an " + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "inline_equation", + "content": "\\approx 101^{\\circ}" + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "text", + "content": " horizontal field of view yielding a horizontal pixel density of " + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "inline_equation", + "content": "\\approx 25.3" + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "text", + "content": " pixels/degree. When a virtual phone is held at " + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 313, + 599, + 560, + 709 + ], + "type": "text", + "content": " away, the horizontal pixel density drops below 25.3 pixels/degree due to downsampling of the virtual phone screen as seen through the VR display. This presents a problem for viewing the content of the virtual phone - in particular, text - as its pixel density is significantly lower than when viewing a physical" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 374, + 57, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 57, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 374, + 57, + 559, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 84, + 289, + 189 + ], + "blocks": [ + { + "bbox": [ + 62, + 84, + 289, + 189 + ], + "lines": [ + { + "bbox": [ + 62, + 84, + 289, + 189 + ], + "spans": [ + { + "bbox": [ + 62, + 84, + 289, + 189 + ], + "type": "image", + "image_path": "7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 201, + 295, + 224 + ], + "lines": [ + { + "bbox": [ + 50, + 201, + 295, + 224 + ], + "spans": [ + { + "bbox": [ + 50, + 201, + 295, + 224 + ], + "type": "text", + "content": "Figure 9: Readable font size of the virtual smartphone at a magnification ratio" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "spans": [ + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "type": "text", + "content": "phone. For instance, the Galaxy S5 gives " + }, + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "type": "inline_equation", + "content": "\\approx 89.4" + }, + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "type": "text", + "content": " pixels/degree at " + }, + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "type": "inline_equation", + "content": "30\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 243, + 294, + 264 + ], + "type": "text", + "content": " distance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "spans": [ + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "type": "text", + "content": "We tackle this issue by scaling up the virtual phone's size by a factor that ensures that the phone's display text remains legible. To determine this factor, we recruited three participants and asked them to record the minimum readable font sizes, while showing them a virtual smartphone (at a distance of " + }, + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "type": "inline_equation", + "content": "30~\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "type": "text", + "content": ") with various magnification ratios (increased by 0.1 from 1.0 to 2.7). Figure 9 shows that participants could read text with the font size= 12sp (the commonly used minimum font size for mobile apps) for magnification factors " + }, + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "type": "inline_equation", + "content": "\\geq 1.5" + }, + { + "bbox": [ + 50, + 265, + 295, + 397 + ], + "type": "text", + "content": ". Accordingly, we used 1.5 as the default magnification ratio for the smartphone and its display. We also proportionately scaled up the user's rendered hand. User studies (Section 7) show that users found this configuration highly usable." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 406, + 200, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 406, + 200, + 418 + ], + "spans": [ + { + "bbox": [ + 51, + 406, + 200, + 418 + ], + "type": "text", + "content": "6.3 Rendering Virtual Hand" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "spans": [ + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": "As discussed in Section 4.3, the rendering latency of the virtual hand is proportional to the number of vertices in the Delaunay triangulation-based mesh. To reduce the latency, we apply a nonuniform sampling approach. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the " + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": " axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We empirically determined the sampling rate " + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": ", by varying " + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": " and measuring both (i) the processing latency and (ii) the SSIM [12, 46] (Structural SIMilarity; a metric of perceived image quality) of the hand images, using 200 RGB-D frames. Figure 10 shows the results. Without any subsampling (" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "X = 0\\%" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": "), the rendering latency is 311.1 msec, which is too high for our responsiveness goal. We empirically downsample the internal pixels by a factor of 32 (" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "X = 99.9\\%" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": "), i.e., choosing every " + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "inline_equation", + "content": "32^{nd}" + }, + { + "bbox": [ + 50, + 421, + 295, + 631 + ], + "type": "text", + "content": " pixel on the grid. This results in a latency of 26.9 msec, while keeping the SSIM = 0.976, a level indistinguishable with the original as perceived by a human." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 639, + 200, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 639, + 200, + 651 + ], + "spans": [ + { + "bbox": [ + 51, + 639, + 200, + 651 + ], + "type": "text", + "content": "6.4 Environment Emulation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "content": "To enable holistic evaluation of app interactions, Empath-D emulates not just the virtual phone, but the entire virtual world as well. In our current implementation, we emulated a crowded Urban Street environment, which includes crosswalks, traffic lights, pedestrians and commonplace roadside obstacles. To further mimic real-world" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 335, + 84, + 542, + 192 + ], + "blocks": [ + { + "bbox": [ + 335, + 84, + 542, + 192 + ], + "lines": [ + { + "bbox": [ + 335, + 84, + 542, + 192 + ], + "spans": [ + { + "bbox": [ + 335, + 84, + 542, + 192 + ], + "type": "image", + "image_path": "ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 204, + 559, + 225 + ], + "lines": [ + { + "bbox": [ + 314, + 204, + 559, + 225 + ], + "spans": [ + { + "bbox": [ + 314, + 204, + 559, + 225 + ], + "type": "text", + "content": "Figure 10: Rendering latency vs. image quality of the virtual hand" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 245, + 559, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 245, + 559, + 300 + ], + "spans": [ + { + "bbox": [ + 313, + 245, + 559, + 300 + ], + "type": "text", + "content": "movement, our implementation allows the user to navigate the virtual world by (i) rotating her head (this uses the head tracking ability of the VR device), and (ii) by 'walking in place', using the technique proposed in [45] as this does not require any additional hardware on the VR device." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 309, + 405, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 309, + 405, + 322 + ], + "spans": [ + { + "bbox": [ + 315, + 309, + 405, + 322 + ], + "type": "text", + "content": "6.5 VR Manager" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 323, + 559, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 559, + 412 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 559, + 412 + ], + "type": "text", + "content": "This component currently executes on the VR smartphone, and is responsible for combining the output of the various components (Hand Tracker, Phone Tracker and Virtual Phone) in the virtual world. This component, implemented as a Unity application, renders these various components. This component is also responsible for applying the impairments on the output of the virtual world. The image effects simulating low vision impairments are defined as a script, Shaders in Unity." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 421, + 405, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 421, + 405, + 432 + ], + "spans": [ + { + "bbox": [ + 315, + 421, + 405, + 432 + ], + "type": "text", + "content": "7 EVALUATION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 435, + 559, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 435, + 559, + 535 + ], + "spans": [ + { + "bbox": [ + 313, + 435, + 559, + 535 + ], + "type": "text", + "content": "We now present a mix of system and user experiments to evaluate the performance and efficacy of our Empath-D implementation. Besides micro-benchmark studies, we conducted two experiments to capture user interaction with Empath-D. In Experiment 1, we examine the performance of Empath-D vs. a real-world smartphone, in the absence of any impairments. In Experiment 2, we consider an impairment-augmented version of Empath-D, comparing the performance of users against the use of commercial impairment simulation hardware." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 544, + 514, + 570 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 544, + 514, + 570 + ], + "spans": [ + { + "bbox": [ + 315, + 544, + 514, + 570 + ], + "type": "text", + "content": "7.1 Micro-benchmark Performance of Empath-D" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 571, + 559, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 571, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 571, + 559, + 605 + ], + "type": "text", + "content": "We measured the overall latency of Empath-D, both in terms of the delay in reflecting touch interactions in the virtual world and in terms of the hand tracking delay." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 611, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 559, + 710 + ], + "type": "text", + "content": "7.1.1 End-to-end Latency of Touch Interaction. As a measure of the overall responsiveness of Empath-D, we computed the latency between a touch input, on the physical smartphone, and the resulting change in the content of the virtual smartphone, rendered in the VR display. To measure this, we utilised a high framerate camera (operating at 240 fps) to concurrently record both the screen of the physical smartphone and the virtual phone (displayed in the VR). The phone screen is coloured green initially, and was programmed to turn red as soon as it received a touch input. We repeated the" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 237, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 237, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 237, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 485, + 58, + 558, + 68 + ], + "type": "text", + "content": "Wonjung Kim et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 84, + 275, + 178 + ], + "blocks": [ + { + "bbox": [ + 70, + 84, + 275, + 178 + ], + "lines": [ + { + "bbox": [ + 70, + 84, + 275, + 178 + ], + "spans": [ + { + "bbox": [ + 70, + 84, + 275, + 178 + ], + "type": "image", + "image_path": "f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 190, + 269, + 202 + ], + "lines": [ + { + "bbox": [ + 77, + 190, + 269, + 202 + ], + "spans": [ + { + "bbox": [ + 77, + 190, + 269, + 202 + ], + "type": "text", + "content": "Figure 11: Overhead of impairment simulation" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 226, + 294, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 226, + 294, + 269 + ], + "spans": [ + { + "bbox": [ + 50, + 226, + 294, + 269 + ], + "type": "text", + "content": "measurement 23 times, capturing (via the video frames) the time gap between (i) the physical smartphone screen turning red and (ii) the virtual smartphone turning red in the VR display. The end-to-end latency is 237.70 msec (" + }, + { + "bbox": [ + 50, + 226, + 294, + 269 + ], + "type": "inline_equation", + "content": "SD = 20.43" + }, + { + "bbox": [ + 50, + 226, + 294, + 269 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": "By monitoring the intermediary computer, we obtained the breakdown of this delay: (i) smartphone responsiveness (the time from the user touching the screen till the time the phone transmits the touch event to the computer) " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "= 0.3" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": " msec " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "(SD = 0.16)" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": "; (ii) computer emulation responsiveness (the time from receiving the touch event till the time the screenshot of the modified display is sent to the VR device) " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "= 141.37" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": " msec " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "(SD = 6.6)" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": ", and (iii) the VR responsiveness (the time from receiving the screenshot till it is rendered on the VR display) " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "= 10.46" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": " msec " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "(SD = 8.36)" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": ". The remaining latency (" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\approx 87" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": " msec) can be attributed as the WiFi network latency. These micro-measurements suggest that the default Android emulator used in our studies was the dominant component of the latency. The default Android emulator is known to be fairly slow, and multiple third party emulators (e.g., Genymotion [16]) are reported to provide significantly lower latency. Accordingly, we anticipate that this overall latency can be reduced to " + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "inline_equation", + "content": "\\leq 150" + }, + { + "bbox": [ + 50, + 270, + 295, + 456 + ], + "type": "text", + "content": " msec, without any significant architectural modification of Empath-D." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "spans": [ + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": "7.1.2 End-to-end Latency of Virtual Hand. We also evaluated the latency between the physical movement of the user's hand and the rendering of this movement in the VR display. To capture this time difference, we displayed a small circle, at a specific point on the display, on both the smartphone and the virtual phone. Users were instructed to swipe a finger on the screen to reach the circle. We measured, over 20 experiments, the time (no. of frames from the previously used high framerate camera) between the occlusion of the circle on the physical phone and the resulting occlusion in the virtual phone, computing an average latency of " + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "117.46\\mathrm{msec}" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "SD = 20.44" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": "). Additionally, we measured the component delays of this rendering process as: (i) reading an RGBD frame: " + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "4.90\\mathrm{msec}" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "SD = 0.58" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": "); (ii) phone tracking: " + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "4.56\\mathrm{msec}" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "SD = 0.25" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": "); (iii) hand tracking: " + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "8.0\\mathrm{msec}" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "SD = 1.58" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": "), and (iv) the VR responsiveness (the time from receiving the hand mesh till it is rendered on the VR display): " + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "26.99\\mathrm{msec}" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "SD = 5.22" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": "). The remaining latency, attributable to the WiFi network, is " + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "inline_equation", + "content": "\\approx 73\\mathrm{msec}" + }, + { + "bbox": [ + 50, + 464, + 295, + 661 + ], + "type": "text", + "content": ", consistent with the measurements reported above." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 672, + 275, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 672, + 275, + 685 + ], + "spans": [ + { + "bbox": [ + 51, + 672, + 275, + 685 + ], + "type": "text", + "content": "7.2 Study Design for Usability Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 686, + 295, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 686, + 295, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 686, + 295, + 709 + ], + "type": "text", + "content": "We then conducted user studies on the usability and real-world fidelity of our Empath-D implementation. The user study (approved" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 328, + 105, + 544, + 196 + ], + "blocks": [ + { + "bbox": [ + 328, + 83, + 545, + 94 + ], + "lines": [ + { + "bbox": [ + 328, + 83, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 328, + 83, + 545, + 94 + ], + "type": "text", + "content": "Table 2: Study Tasks and Conditions in Experiment 1" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 328, + 105, + 544, + 196 + ], + "lines": [ + { + "bbox": [ + 328, + 105, + 544, + 196 + ], + "spans": [ + { + "bbox": [ + 328, + 105, + 544, + 196 + ], + "type": "table", + "html": "
TaskCond-itionImpairmentSimulator TypeEnviro-nment
T1-T4AnonenoneReal
BCataractsPhysicalReal
CnonenoneVirtual
DCataractsVirtualVirtual
EGlaucomaRealPhysical
FGlaucomaVirtualVirtual
", + "image_path": "f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 332, + 228, + 541, + 310 + ], + "blocks": [ + { + "bbox": [ + 323, + 207, + 550, + 218 + ], + "lines": [ + { + "bbox": [ + 323, + 207, + 550, + 218 + ], + "spans": [ + { + "bbox": [ + 323, + 207, + 550, + 218 + ], + "type": "text", + "content": "Table 3: Smartphone Interaction Tasks in Experiment 1" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 332, + 228, + 541, + 310 + ], + "lines": [ + { + "bbox": [ + 332, + 228, + 541, + 310 + ], + "spans": [ + { + "bbox": [ + 332, + 228, + 541, + 310 + ], + "type": "table", + "html": "
Task TypeTask CodeTask Description
Everyday Phone UseT1Perform a Calculation
T2Add an Alarm
T3Search, Save Image on Browser
Controlled PointingT4Number Search and Point
", + "image_path": "061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 326, + 559, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 326, + 559, + 358 + ], + "spans": [ + { + "bbox": [ + 313, + 326, + 559, + 358 + ], + "type": "text", + "content": "by our institution's IRB) consisted of 12 users (9 males) with no pre-existing uncorrected vision impairments. Users were aged 24-39, with a mean age of 30.3 years " + }, + { + "bbox": [ + 313, + 326, + 559, + 358 + ], + "type": "inline_equation", + "content": "(\\mathrm{SD} = 5)" + }, + { + "bbox": [ + 313, + 326, + 559, + 358 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 358, + 559, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 358, + 559, + 457 + ], + "spans": [ + { + "bbox": [ + 313, + 358, + 559, + 457 + ], + "type": "text", + "content": "Study Tasks and Measures. We adopted a repeated measures design, with participants counterbalanced for condition order (see Table 2 for the conditions). Participants were asked to perform four different tasks split into two task types; everyday phone use, and controlled pointing (see Table 3). Users were asked to perform all tasks using two-handed interaction, holding the phone at a distance that they normally would during daily use. We chose two-handed interaction to eliminate for phone balancing that is typical in one-handed interaction given the typical size of today's smartphones." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 457, + 559, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 457, + 559, + 643 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 559, + 643 + ], + "type": "text", + "content": "T1-T3 are everyday tasks users perform on a smartphone. They cover smartphone touch interaction of taps, swipes, and long press, on UI widgets such as keyboards, buttons and scrolling content. Users were asked to experience performing these tasks under six conditions, including under impairments (both using the physical hardware and the VR device). At the end of all three tasks (T1-T3), users completed the NASA-TLX[18] survey to indicate their perceived workload during task performance. T4, on the other hand, is a controlled pointing task experiment. Participants were given a stimulus number and then asked to click on the button with the corresponding number, as quickly and as precisely as they could. (See Figure 12 for a screenshot of the application used in this task.) Users repeated this task 80 times in succession, for each of the six conditions (A-F; see Table 2). We recorded the touch times and positions with the task app. We conducted a short semi-structured interview at the end of the study to understand users' experiences with, and perceptions of, the physical and virtual impairment simulations." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 643, + 559, + 710 + ], + "type": "text", + "content": "Instruments: We compared Empath-D with a commercial physical impairment simulator [13]. To calibrate for visual acuity, we adapted a test similar to a Snellen eye test chart [42] - showing rows of letters with each lower row having a smaller font size. We first used the physical impairment simulator to obtain the minimum acceptable font size. Using the same test page in the VR, we applied" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 57, + 282, + 69 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 374, + 57, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 57, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 374, + 57, + 559, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 164, + 95, + 182, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 95, + 182, + 111 + ], + "spans": [ + { + "bbox": [ + 164, + 95, + 182, + 111 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 126, + 140, + 220, + 253 + ], + "blocks": [ + { + "bbox": [ + 126, + 140, + 220, + 253 + ], + "lines": [ + { + "bbox": [ + 126, + 140, + 220, + 253 + ], + "spans": [ + { + "bbox": [ + 126, + 140, + 220, + 253 + ], + "type": "table", + "html": "
271613
251815
232026
121417
211119
221024
", + "image_path": "8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 265, + 295, + 286 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 265, + 295, + 286 + ], + "spans": [ + { + "bbox": [ + 50, + 265, + 295, + 286 + ], + "type": "text", + "content": "Figure 12: Screenshot of a test application for the pointing task" + } + ] + } + ], + "index": 4, + "type": "text" + }, + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "spans": [ + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "type": "text", + "content": "the impairment and gradually adjusted the severity until we hit the minimum acceptable font size. To calibrate the inner circle of clarity for glaucoma, we implemented an app that allows us to adjust the diameter of a coloured circle. We then used the physical impairment simulator for glaucoma, and adjusted the coloured circle to the point in which the circle reaches the fringe for clarity. We then calibrated the virtual glaucoma simulation in a similar manner. Three independent measurements for visual acuity and circle of clarity were taken from the research team and averaged to determine the final calibration parameters of font size " + }, + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "type": "inline_equation", + "content": "= 12" + }, + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "type": "text", + "content": " sp and diameter " + }, + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "type": "inline_equation", + "content": "= 60" + }, + { + "bbox": [ + 50, + 318, + 295, + 438 + ], + "type": "text", + "content": " mm." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 452, + 253, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 452, + 253, + 464 + ], + "spans": [ + { + "bbox": [ + 51, + 452, + 253, + 464 + ], + "type": "text", + "content": "7.3 Empath-D vs. Physical Smartphone" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 466, + 295, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 466, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 50, + 466, + 295, + 510 + ], + "type": "text", + "content": "We first investigate whether the VR-based interaction is a sufficiently faithful replica of the real-world interaction that a user would have with a regular smartphone, in the absence of any impairments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "spans": [ + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": "Touch Accuracy: In all six conditions, users were able to achieve high levels of button touch accuracy (see Table 4), with the accuracy being " + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "98.8\\%" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "SD = 1.67" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": ") when the users interacted unimpaired with the VR device. Comparing the accuracies between the physical smartphone and the VR device, we noted that the VR condition had an accuracy of " + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "99.12\\%" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "SD = 1.32" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": ") (across all 6 conditions), whereas the use of the physical smartphone provided " + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": " accuracy. In terms of the location accuracy, we noted a difference of " + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "2.28 \\, \\text{mm}" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "inline_equation", + "content": "SD = 2.98" + }, + { + "bbox": [ + 50, + 510, + 295, + 642 + ], + "type": "text", + "content": ") between the use of Empath-D vs. a physical smartphone. This difference is well within the uncertainty associated with finger touch interactions, and thus demonstrates that user performance was equivalent across both Empath-D and a physical smartphone." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "type": "text", + "content": "Perceived Workload: NASA-TLX scores indicated that the users did perceive significant differences in their workload using Empath-D, compared to use of the physical smartphone (" + }, + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "type": "inline_equation", + "content": "Z = 2.824" + }, + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "type": "inline_equation", + "content": "p = 0.005 < 0.05" + }, + { + "bbox": [ + 50, + 642, + 295, + 708 + ], + "type": "text", + "content": "). This does suggest that the navigating an app within the VR device does require greater cognitive effort than simply interacting with a regular smartphone. However, it is difficult to" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 337, + 105, + 538, + 186 + ], + "blocks": [ + { + "bbox": [ + 331, + 83, + 542, + 94 + ], + "lines": [ + { + "bbox": [ + 331, + 83, + 542, + 94 + ], + "spans": [ + { + "bbox": [ + 331, + 83, + 542, + 94 + ], + "type": "text", + "content": "Table 4: Accuracy of Button Touch Across All Users" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 337, + 105, + 538, + 186 + ], + "lines": [ + { + "bbox": [ + 337, + 105, + 538, + 186 + ], + "spans": [ + { + "bbox": [ + 337, + 105, + 538, + 186 + ], + "type": "table", + "html": "
ImpairmentEnvironmentAccuracy (SD) %
NonePhysical100
Virtual98.79 (1.67)
CataractsPhysical100
Virtual99.09 (1.36)
GlaucomaPhysical100
Virtual99.49 (0.82)
", + "image_path": "2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 213, + 559, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 213, + 559, + 235 + ], + "spans": [ + { + "bbox": [ + 313, + 213, + 559, + 235 + ], + "type": "text", + "content": "decipher whether this difference is due to Empath-D-specific issues, or a general lack of familiarity with VR devices." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 236, + 559, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 236, + 559, + 345 + ], + "spans": [ + { + "bbox": [ + 313, + 236, + 559, + 345 + ], + "type": "text", + "content": "We additionally investigated the subjective feedback captured by the semi-structured interview. " + }, + { + "bbox": [ + 313, + 236, + 559, + 345 + ], + "type": "inline_equation", + "content": "83\\%" + }, + { + "bbox": [ + 313, + 236, + 559, + 345 + ], + "type": "text", + "content": " (10) of the users reported perceiving increased latency while using Empath-D, while 2 users indicated that they felt no noticeable latency difference. However, all 12 users indicated that the performance of Empath-D was \"acceptable\", and they would be able to use the Empath-D system for testing the usability of apps, as long as the apps do not require extremely low-latency interactions. (3 users indicated that the system might not be usable for testing real-time games.)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 357, + 524, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 357, + 524, + 381 + ], + "spans": [ + { + "bbox": [ + 314, + 357, + 524, + 381 + ], + "type": "text", + "content": "7.4 Empath-D vs. Hardware Impairment Simulators" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 384, + 559, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 559, + 440 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 559, + 440 + ], + "type": "text", + "content": "We now study the performance of Empath-D vis-a-vis impairments generated using commercially available hardware. Figure 11 shows the overhead of Empath-D under impairment conditions, demonstrating that Empath-D is able to operate without significant performance loss even in the presence of impairments." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": "Touch Accuracy: Table 4 enumerates the accuracy for the pointing task (T4) for two distinct impairments (Cataract & Glaucoma), for both the VR-based Empath-D system and the hardware impairment simulator. We see that, in the Cataract condition, Empath-D had a mean accuracy of " + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "99.09\\%" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": ", which is virtually indistinguishable from that of the hardware device (" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": "). A similar pattern was observed for the Glaucoma impairment (" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "99.49\\%" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": " for Empath-D vs. " + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": " for Hardware). In terms of the location accuracy, we noted a difference of " + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "1.7 \\, \\text{mm}" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "SD = 1.9" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": ") (for Cataract) and " + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "1.2 \\, \\text{mm}" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "inline_equation", + "content": "SD = 1.6" + }, + { + "bbox": [ + 313, + 440, + 559, + 604 + ], + "type": "text", + "content": ") (for Glaucoma) between the use of Empath-D vs. the impairment hardware. Once again, this difference is well within the uncertainty associated with finger touch interactions. These results provide strong evidence that Empath-D is able to emulate impairment conditions that are equivalent to that of dedicated, commercial hardware." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "spans": [ + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "type": "text", + "content": "Perceived Workload: The numerical TLX scores indicated that there was no significant difference for Cataracts; however, the difference for Glaucoma was significant " + }, + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "type": "inline_equation", + "content": "(Z = 3.061" + }, + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "type": "inline_equation", + "content": "p = 0.002 < 0.05)" + }, + { + "bbox": [ + 313, + 604, + 559, + 648 + ], + "type": "text", + "content": " with users indicating a higher perceived workload for the VR device." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 661, + 423, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 661, + 423, + 671 + ], + "spans": [ + { + "bbox": [ + 314, + 661, + 423, + 671 + ], + "type": "text", + "content": "7.5 Motion sickness" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 675, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 675, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 675, + 559, + 708 + ], + "type": "text", + "content": "At the end of the user study, we asked each participant if they felt discomfort or unwell. Only two of the twelve participants reported slight motion sickness while using Empath-D. Motion sickness may" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 236, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 236, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 236, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 484, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 484, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 484, + 57, + 559, + 68 + ], + "type": "text", + "content": "Wonjung Kim et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 85, + 294, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 85, + 294, + 107 + ], + "spans": [ + { + "bbox": [ + 51, + 85, + 294, + 107 + ], + "type": "text", + "content": "arise from: (1) the use of the VR display itself, and (2) the latency from Empath-D. However, it is difficult to separate the two." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 107, + 294, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 107, + 294, + 173 + ], + "spans": [ + { + "bbox": [ + 50, + 107, + 294, + 173 + ], + "type": "text", + "content": "The effects of motion sickness are notably minor in our current prototype of Empath-D. The nature of our experimentation intensifies the use of the VR display, whereas practical use of Empath-D is likely to be more interspersed between app redesigns. We further discuss how we may improve on latency in Section 9.2 to reduce motion sickness that may result from the latency of Empath-D." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 195, + 157, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 195, + 157, + 205 + ], + "spans": [ + { + "bbox": [ + 51, + 195, + 157, + 205 + ], + "type": "text", + "content": "8 RELATED WORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 209, + 295, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 209, + 295, + 384 + ], + "spans": [ + { + "bbox": [ + 50, + 209, + 295, + 384 + ], + "type": "text", + "content": "Designing for Inclusiveness. Newell et al. [31] pointed out that traditional user-centred design techniques provide little guidance for designing interfaces for elderly and disabled users due to the large variation amongst the type and degree of impairments. They also highlighted that the standard guidelines for designing disabled-friendly UIs are too general [30] and lacked empathy for users. For instance the WCAG 2.0 lists that the use of colour \"is not used as the only visual means of conveying information, indicating an action, prompting a response or distinguishing a visual element\". This requires interpretation by the designer into specific designs in his application. Over the years, various accessibility design guidelines (such as WCAG 2.0 [3], IBM Accessibility Checklist [38], US Section 508 Standards [2]) and tools (aChecker [15]) have been proposed and refined. However, the problems pointed out by Newell are remained unsolved to a large extent, which hinders elaborate design for a target user group with a specific impairment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 385, + 295, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 385, + 295, + 495 + ], + "spans": [ + { + "bbox": [ + 50, + 385, + 295, + 495 + ], + "type": "text", + "content": "Simulated Design. There exists prior work on helping UI designers design better interfaces for people suffering from vision impairments. Higuchi et al. [19] proposed a tool to simulate the visual capabilities of the elderly for the design of control panels, while Mankoff et al. [26] developed a tool to simulate a user with visual and motor impairments on the desktop screen. SIMVIZ [9, 47] uses the Oculus Rift VR device to simulate visual impairments to examine reading text on a smartphone. For audio modalities, Werfel et al. [47] simulated hearing ailments by using a pair of microphones with equalised headphones." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 495, + 295, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 495, + 295, + 560 + ], + "spans": [ + { + "bbox": [ + 50, + 495, + 295, + 560 + ], + "type": "text", + "content": "Different from prior works, Empath-D uses VR as the medium for immersive evaluation to 1) flexibly support wider groups of impaired users, and 2) allow naturalistic interactions with a mobile phone in a virtual environment. This novel approach supports ecological validity in testing applications and is key for mobile apps which go beyond the static settings of previous work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 560, + 295, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 560, + 295, + 604 + ], + "spans": [ + { + "bbox": [ + 50, + 560, + 295, + 604 + ], + "type": "text", + "content": "While previous work has focused on simulation in single modality (visual or auditory), Empath-D is able to flexibly combine modalities to support any application type, ailment (visual, auditory, motor) and usage environment." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 604, + 295, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 604, + 295, + 703 + ], + "spans": [ + { + "bbox": [ + 50, + 604, + 295, + 703 + ], + "type": "text", + "content": "System Support for Accessibility. Modern mobile OSes provide accessibility support; in particular, it allows users with far-sightedness to increase fonts and users with blindness to interact through vocal interfaces. Also, Zhong et al. enhanced Android accessibility for users with hand tremor by reducing fine pointing and steady tapping [48]. We believe Empath-D will significantly expand basic accessibility support of commodity devices and accelerates the design and deployment of various accessibility add-ons for different impaired users." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 85, + 559, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 85, + 559, + 205 + ], + "spans": [ + { + "bbox": [ + 313, + 85, + 559, + 205 + ], + "type": "text", + "content": "Testing of Mobile Applications. Recently there have been many systems, such as VanarSena [37], AMC [22], Puma [17], DynoDroid [25], DECAF [24], AppsPlayground [36], for automatically testing and identifying various types of UI and systems bugs in mobile applications. Empath-D takes a different approach in that we do not detect bugs after the application is developed and deployed. Instead, we allow the designer to test early iterations of the designs rapidly. In this way, we hope to reduce the pain of having to make significant UI changes at the end of the design cycle - or worse, end with an application that cannot be used effectively by the target impaired demographic." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 219, + 399, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 219, + 399, + 230 + ], + "spans": [ + { + "bbox": [ + 314, + 219, + 399, + 230 + ], + "type": "text", + "content": "9 DISCUSSION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 233, + 559, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 233, + 559, + 277 + ], + "spans": [ + { + "bbox": [ + 313, + 233, + 559, + 277 + ], + "type": "text", + "content": "Our current studies indicate the considerable promise of Empath-D, as a mechanism for rapid and empathetic evaluation of app usability. We now discuss some additional studies and issues that we intend to explore further." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 292, + 471, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 292, + 471, + 304 + ], + "spans": [ + { + "bbox": [ + 314, + 292, + 471, + 304 + ], + "type": "text", + "content": "9.1 User study with Designers" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 306, + 559, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 306, + 559, + 481 + ], + "spans": [ + { + "bbox": [ + 313, + 306, + 559, + 481 + ], + "type": "text", + "content": "We conducted a short user study with two mobile app developers to qualitatively examine Empath-D in actual use. Both developers have previously worked to create an Android mobile application, which was used as the baseline for the study. The developers were tasked with redesigning the mobile app for the glaucoma-impaired under two conditions: 1) without Empath-D, but with materials describing glaucoma and showing functionally accurate examples of glaucoma, and 2) with the same materials, and Empath-D. Both developers agreed that Empath-D helped them improve their designs over the baseline condition. The developers reported that Empath-D allowed them to improve their designs in two ways: 1) they can focus their attention on re-designing particular problematic parts of the UI, and 2) they are able to appropriately calibrate their modifications (for instance increasing the font size may help, but text that is too large will also cause glaucoma sufferers to visually scan more, causing fatigue)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 495, + 481, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 495, + 481, + 508 + ], + "spans": [ + { + "bbox": [ + 314, + 495, + 481, + 508 + ], + "type": "text", + "content": "9.2 Dealing with Latency Issues" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "type": "text", + "content": "Our experimental studies indicate that users are able to utilise Empath-D effectively for \"conventional\" apps—i.e., those that typically involve sporadic interaction by users with UI elements, such as buttons and keyboards. The current end-to-end latency (of " + }, + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "type": "text", + "content": " 200 msec) is not an impediment for high-fidelity evaluation of such apps. However, the participants also indicated that this latency (lag between user actions and rendering in the VR display) would pose a problem for highly latency-sensitive applications, such as games. At present, it is thus appropriate to state that Empath-D potentially needs additional optimisations to support such applications. The most obvious improvement would be to replace the default Android emulator with a faster, custom emulation engine—this is likely to reduce " + }, + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 313, + 510, + 559, + 652 + ], + "type": "text", + "content": " 100 msec of the delay budget." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 653, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 559, + 708 + ], + "type": "text", + "content": "The current implementation streams JPEG images (hand, emulator's screen) from the intermediary computer to the VR smartphone. We plan to adopt a low-latency video streaming codec such as H.265 HEVC [43], which would help reduce networking and rendering latency. OS-level optimisations (e.g., preemptive priority" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 58, + 282, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 58, + 282, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 58, + 282, + 69 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 374, + 58, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 58, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 374, + 58, + 558, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 85, + 294, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 85, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 50, + 85, + 294, + 106 + ], + "type": "text", + "content": "for inter-component messages) may be needed to support even lower latency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 106, + 294, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 106, + 294, + 152 + ], + "spans": [ + { + "bbox": [ + 50, + 106, + 294, + 152 + ], + "type": "text", + "content": "Recently, several works have proposed techniques for achieving high-quality VR experience on mobile devices [7, 10, 21]. Empath- " + }, + { + "bbox": [ + 50, + 106, + 294, + 152 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 50, + 106, + 294, + 152 + ], + "type": "text", + "content": " could borrow some techniques to improve latency and video quality." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 161, + 253, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 161, + 253, + 172 + ], + "spans": [ + { + "bbox": [ + 51, + 161, + 253, + 172 + ], + "type": "text", + "content": "9.3 User Performance with VR Devices" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 175, + 295, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 175, + 295, + 317 + ], + "spans": [ + { + "bbox": [ + 50, + 175, + 295, + 317 + ], + "type": "text", + "content": "Moreover, our user studies also indicated that the time for performing tasks (T1-T4) was marginally higher when using the VR environment, compared to the direct use of a real-world smartphone. More specifically, for the pointing task T4, there was an average difference of 654 msec in the task completion time using Empath-D, compared to the smartphone. In addition, anecdotal comments suggest that continued use of the VR device, for longer-lived sessions, might pose additional usability challenges. For example, a couple of users indicated some minor muscle fatigue, most likely as a result of using a 'heavy' VR device. It is an open question whether these issues will be mitigated over time, as VR devices become lighter and more ergonomic, and as users have greater familiarity with the use of VR devices." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 329, + 220, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 329, + 220, + 341 + ], + "spans": [ + { + "bbox": [ + 51, + 329, + 220, + 341 + ], + "type": "text", + "content": "9.4 Advanced Uses of Empath-D" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 343, + 295, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 343, + 295, + 519 + ], + "spans": [ + { + "bbox": [ + 50, + 343, + 295, + 519 + ], + "type": "text", + "content": "Our current implementation of Empath-D supports the virtualisation of certain output modalities (specifically the display and audio) of the emulated app. The vision of Empath-D can be extended to create other richer interaction modes, often blending virtual and augmented reality (AR) settings. As an example, certain emulation conditions may need to generate and integrate synthetic sensor traces, to replace the real sensor traces from the smartphone-e.g., to mimic the user's movement in locations, such as forests and mountains, the phone's real GPS trace would need to be replaced by a synthetic GPS trace as in [27, 28]. Similarly, in some cases, the app itself might need to take inputs from the VR world-e.g., if the app was being used to magnify certain objects embedded in the VR world. While such use cases can be supported, they will require enhancements to the current Empath-D framework, and it is likely that the implementation may surface additional challenges, in terms of computational overhead and latency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 529, + 293, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 529, + 293, + 542 + ], + "spans": [ + { + "bbox": [ + 51, + 529, + 293, + 542 + ], + "type": "text", + "content": "9.5 Developing Impairment Filters and Profiles" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 544, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 544, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 50, + 544, + 295, + 654 + ], + "type": "text", + "content": "To demonstrate the viability of Empath-D, we focused on demonstrating the ability to simulate visual impairments and in particular cataracts and glaucoma. As we explored, these impairments have functional aspects that are commonly employed to characterise them, such as visual acuity or contrast sensitivity, and are often accompanied by standard tests such as the Snellen eye test chart [42] and Pelli-Robson contrast sensitivity chart [34] respectively. From examining the commercial physical impairment simulator and our experimentation, we believe that Empath-D has the ability to functionally simulate other impairments." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "content": "We recognise two important directions that Empath-D needs address to improve impairment simulation and use. First, impairment filters have to be developed in concert with medical professionals who are subject matter experts in the areas of the specific pathologies. This helps to develop a library of impairment filters. Second," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 85, + 559, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 85, + 559, + 173 + ], + "spans": [ + { + "bbox": [ + 313, + 85, + 559, + 173 + ], + "type": "text", + "content": "with verified impairment filters, we may create impairment profiles, which characterise groups of users with possibly overlapping requirements. For instance, a hypothetical impairment profile may calibrate for a demographic of a range of ages, sex, and percentage of the population who may have myopia and cataracts—both which affect visual acuity. With impairment profiles, app developers may easily select and understand the demographic to which they are designing for." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 182, + 411, + 193 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 182, + 411, + 193 + ], + "spans": [ + { + "bbox": [ + 315, + 182, + 411, + 193 + ], + "type": "text", + "content": "10 CONCLUSION" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 197, + 559, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 197, + 559, + 427 + ], + "spans": [ + { + "bbox": [ + 313, + 197, + 559, + 427 + ], + "type": "text", + "content": "We presented the design and evaluation of Empath-D, a framework that allows app developers to 'step into the shoes' of impaired users, and perform an empathetic evaluation of their app interfaces. Our key idea is to utilise a virtual world (using a commodity VR device) to present an impaired view of the app's interface, while allowing the user to interact naturally with a real commodity smartphone in the physical world. Overcoming the current computational limitations (of the VR device and the Android emulator) required us to make careful system choices, such as (i) appropriate tradeoffs between the resolution and frame rate for rendering the virtual smartphone, (ii) subsampling of the mesh representing the user's hand and (iii) scaling up the size of the virtual smartphone to overcome the lower resolution of the VR device. User studies show that Empath-D is effective in (a) providing usability that is equivalent to using a real app (on a real smartphone), for applications that do not require ultra-low latency and (b) emulating impairments in a similar fashion to custom hardware devices. We believe that Empath-D can be a powerful new paradigm for effective bidirectional integration between real-world user actions and virtual worlds, and that this can enable additional immersive applications beyond just 'impairment emulation'." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 437, + 460, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 437, + 460, + 449 + ], + "spans": [ + { + "bbox": [ + 315, + 437, + 460, + 449 + ], + "type": "text", + "content": "11 ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 453, + 559, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 453, + 559, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 453, + 559, + 540 + ], + "type": "text", + "content": "We are thankful to our shepherd Prof. Xia Zhou and all anonymous reviewers for their valuable reviews. This research is supported partially by Singapore Ministry of Education Academic Research Fund Tier 2 under research grant MOE2014-T2-1063, and by the National Research Foundation, Prime Minister's Office, Singapore under its IDM Futures Funding Initiative. All findings and recommendations are those of the authors and do not necessarily reflect the views of the granting agency, or SMU." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 551, + 388, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 551, + 388, + 561 + ], + "spans": [ + { + "bbox": [ + 316, + 551, + 388, + 561 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 319, + 564, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 319, + 564, + 559, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 564, + 559, + 581 + ], + "spans": [ + { + "bbox": [ + 319, + 564, + 559, + 581 + ], + "type": "text", + "content": "[1] [n. d.]. AGNES (Age Gain Now Empathy Systems. ([n. d.]). Retrieved 2018-04-13 from http://agelab.mit.edu/agnes-age-gain-now-empathy-system" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 319, + 581, + 559, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 581, + 559, + 596 + ], + "spans": [ + { + "bbox": [ + 319, + 581, + 559, + 596 + ], + "type": "text", + "content": "[2] [n. d]. US Section 508 Standards. ([n. d]). Retrieved 2018-04-13 from https: //www.section508.gov/" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 319, + 597, + 559, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 597, + 559, + 613 + ], + "spans": [ + { + "bbox": [ + 319, + 597, + 559, + 613 + ], + "type": "text", + "content": "[3] 2008. Web Content Accessibility Guidelines (WCAG) 2.0. (11 December 2008). Retrieved 2018-04-13 from https://www.w3.org/TR/WCAG20/" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 319, + 613, + 559, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 613, + 559, + 628 + ], + "spans": [ + { + "bbox": [ + 319, + 613, + 559, + 628 + ], + "type": "text", + "content": "[4] 2016. Samsung Galaxy S7 Specifications. (2016). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/galaxy-s7/#!/spec" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 319, + 628, + 559, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 628, + 559, + 644 + ], + "spans": [ + { + "bbox": [ + 319, + 628, + 559, + 644 + ], + "type": "text", + "content": "[5] 2017. Samsung Gear VR Specifications. (2017). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/gear-vr/specs/" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 319, + 644, + 559, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 644, + 559, + 669 + ], + "spans": [ + { + "bbox": [ + 319, + 644, + 559, + 669 + ], + "type": "text", + "content": "[6] 2018. SolvePnP, Camera Calibration and 3D Reconstruction, OpenCV. (2018). Retrieved 2018-04-13 from https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 319, + 669, + 559, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 669, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 319, + 669, + 559, + 708 + ], + "type": "text", + "content": "[7] Omid Abari, Dinesh Bharadia, Austin Duffield, and Dina Katabi. 2017. Enabling High-Quality Untethered Virtual Reality. In 14th USENIX Symposium on Networked Systems Design and Implementation (NSDI 17). USENIX Association, Boston, MA, 531-544. https://www.usenix.org/conference/nsdi17/technical-sessions/presentation/abari" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 57, + 237, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 57, + 237, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 237, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 485, + 57, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 485, + 57, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 485, + 57, + 559, + 68 + ], + "type": "text", + "content": "Wonjung Kim et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 86, + 296, + 702 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 55, + 86, + 296, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 86, + 296, + 126 + ], + "spans": [ + { + "bbox": [ + 55, + 86, + 296, + 126 + ], + "type": "text", + "content": "[8] Ardalan Amiri Sani, Kevin Boos, Min Hong Yun, and Lin Zhong. 2014. Rio: A System Solution for Sharing I/O Between Mobile Systems. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 259-272. https://doi.org/10.1145/2594368.2594370" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 126, + 296, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 126, + 296, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 126, + 296, + 167 + ], + "type": "text", + "content": "[9] Halim Cagri Ates, Alexander Fiannaca, and Eelke Folmer. 2015. Immersive Simulation of Visual Impairments Using a Wearable See-through Display. In Proceedings of the Ninth International Conference on Tangible, Embedded, and Embodied Interaction (TEI '15). ACM, New York, NY, USA, 225-228. https://doi.org/10.1145/2677199.2680551" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 167, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 167, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 53, + 167, + 294, + 206 + ], + "type": "text", + "content": "[10] Kevin Boos, David Chu, and Eduardo Cuervo. 2016. FlashBack: Immersive Virtual Reality on Mobile Devices via Rendering Memozoation. In Proceedings of the 14th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '16). ACM, New York, NY, USA, 291-304. https://doi.org/10.1145/2906388.2906418" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 206, + 294, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 206, + 294, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 206, + 294, + 247 + ], + "type": "text", + "content": "[11] Kenny Tsu Wei Choo, Rajesh Krishna Balan, Tan Kiat Wee, Jagmohan Chauhan, Archan Misra, and Youngki Lee. 2017. Empath-D: Empathetic Design for Accessibility. In Proceedings of the 18th International Workshop on Mobile Computing Systems and Applications (HotMobile '17). ACM, New York, NY, USA, 55-60. https://doi.org/10.1145/3032970.3032981" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 247, + 294, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 294, + 286 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 294, + 286 + ], + "type": "text", + "content": "[12] Eduardo Cuervo, Alec Wolman, Landon P. Cox, Kiron Lebeck, Ali Razeen, Stefan Saroiu, and Madanlal Musuvathi. 2015. Kahawai: High-Quality Mobile Gaming Using GPU Offload. In Proceedings of the 13th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '15). ACM, New York, NY, USA, 121-135. https://doi.org/10.1145/2742647.2742657" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 286, + 294, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 286, + 294, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 286, + 294, + 302 + ], + "type": "text", + "content": "[13] Marshall Flax. 2018. Low Vision Simulators. (2018). Retrieved 2018-04-13 from https://www.lowvisionsimulators.com/" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 302, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 294, + 334 + ], + "type": "text", + "content": "[14] S. Garrido-Jurado, R. Mu noz Salinas, F.J. Madrid-Cuevas, and M.J. Marin-Jiménez. 2014. Automatic generation and detection of highly reliable fiducial markers under occlusion. Pattern Recognition 47, 6 (2014), 2280-2292. https://doi.org/10.1016/j.patcog.2014.01.005" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 334, + 294, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 334, + 294, + 366 + ], + "spans": [ + { + "bbox": [ + 53, + 334, + 294, + 366 + ], + "type": "text", + "content": "[15] Greg Gay and Cindy Qi Li. 2010. AChecker: Open, Interactive, Customizable, Web Accessibility Checking. In Proceedings of the 2010 International Cross Disciplinary Conference on Web Accessibility (W4A) (W4A '10). ACM, New York, NY, USA, Article 23, 2 pages. https://doi.org/10.1145/1805986.1806019" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 366, + 294, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 366, + 294, + 381 + ], + "spans": [ + { + "bbox": [ + 53, + 366, + 294, + 381 + ], + "type": "text", + "content": "[16] Genymotion. [n. d.]. Genymotion Android Emulator. ([n. d.]). Retrieved 2018-04-13 from https://www.genymotion.com/" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 381, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 381, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 53, + 381, + 294, + 422 + ], + "type": "text", + "content": "[17] Shuai Hao, Bin Liu, Suman Nath, William G.J. Halfond, and Ramesh Govindan. 2014. PUMA: Programmable UI-automation for Large-scale Dynamic Analysis of Mobile Apps. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 204-217. https://doi.org/10.1145/2594368.2594390" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 422, + 294, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 422, + 294, + 445 + ], + "spans": [ + { + "bbox": [ + 53, + 422, + 294, + 445 + ], + "type": "text", + "content": "[18] Sandra G Hart and Lowell E Staveland. 1988. Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research. In Advances in psychology. Vol. 52. Elsevier, 139-183." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 445, + 294, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 445, + 294, + 478 + ], + "spans": [ + { + "bbox": [ + 53, + 445, + 294, + 478 + ], + "type": "text", + "content": "[19] Kazunori Higuchi, Yasuo Sakaguchi, Kazuhiko Sugiyama, and Tomoaki Nakano. 1999. Simulating the human vision of elderly for designing control panels. In Systems, Man, and Cybernetics, 1999. IEEE SMC'99 Conference Proceedings. 1999 IEEE International Conference on, Vol. 5. IEEE, 703-708." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 478, + 294, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 478, + 294, + 502 + ], + "spans": [ + { + "bbox": [ + 53, + 478, + 294, + 502 + ], + "type": "text", + "content": "[20] Intel. 2016. Intel®RealSense™ Camera SR300 Product Specifications. (2016). Retrieved 2018-04-13 from https://ark.intel.com/products/92329/Intel-RealSense-Camera-SR300" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 502, + 294, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 502, + 294, + 542 + ], + "spans": [ + { + "bbox": [ + 53, + 502, + 294, + 542 + ], + "type": "text", + "content": "[21] Zeci Lai, Y. Charlie Hu, Yong Cui, Linhui Sun, and Ningwei Dai. 2017. Furion: Engineering High-Quality Immersive Virtual Reality on Today's Mobile Devices. In Proceedings of the 23rd Annual International Conference on Mobile Computing and Networking (MobiCom '17). ACM, New York, NY, USA, 409-421. https://doi.org/10.1145/3117811.3117815" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 542, + 294, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 542, + 294, + 582 + ], + "spans": [ + { + "bbox": [ + 53, + 542, + 294, + 582 + ], + "type": "text", + "content": "[22] Kyungmin Lee, Jason Flinn, T.J. Giuli, Brian Noble, and Christopher Peplin. 2013. AMC: Verifying User Interface Properties for Vehicular Applications. In Proceeding of the 11th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '13). ACM, New York, NY, USA, 1-12. https://doi.org/10.1145/2462456.2464459" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 582, + 294, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 582, + 294, + 606 + ], + "spans": [ + { + "bbox": [ + 53, + 582, + 294, + 606 + ], + "type": "text", + "content": "[23] Gordon E Legge, Sing-Hang Cheung, Deyue Yu, Susana TL Chung, Hye-Won Lee, and Daniel P Owens. 2007. The case for the visual span as a sensory bottleneck in reading. Journal of Vision 7, 2 (2007), 9-9." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 606, + 294, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 606, + 294, + 646 + ], + "spans": [ + { + "bbox": [ + 53, + 606, + 294, + 646 + ], + "type": "text", + "content": "[24] Bin Liu, Suman Nath, Ramesh Govindan, and Jie Liu. 2014. DECAF: Detecting and Characterizing Ad Fraud in Mobile Apps. In 11th USENIX Symposium on Networked Systems Design and Implementation (NSDI 14). USENIX Association, Seattle, WA, 57-70. https://www.usenix.org/conference/nsdi14/technical-sessions/presentation/liu_bin" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 646, + 294, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 646, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 53, + 646, + 294, + 678 + ], + "type": "text", + "content": "[25] Aravind Machiry, Rohan Tahiliani, and Mayur Naik. 2013. Dynodroid: An Input Generation System for Android Apps. In Proceedings of the 2013 9th Joint Meeting on Foundations of Software Engineering (ESEC/FSE 2013). ACM, New York, NY, USA, 224-234. https://doi.org/10.1145/2491411.2491450" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 678, + 294, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 678, + 294, + 702 + ], + "spans": [ + { + "bbox": [ + 53, + 678, + 294, + 702 + ], + "type": "text", + "content": "[26] Jennifer Mankoff, Holly Fait, and Ray Juang. 2005. Evaluating accessibility by simulating the experiences of users with vision or motor impairments. IBM Systems Journal 44, 3 (2005), 505-517." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 86, + 559, + 709 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 316, + 86, + 559, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 86, + 559, + 134 + ], + "spans": [ + { + "bbox": [ + 316, + 86, + 559, + 134 + ], + "type": "text", + "content": "[27] Chulhong Min, Seungchul Lee, Changhun Lee, Youngki Lee, Seungwoo Kang, Seungpyo Choi, Wonjung Kim, and Junehwa Song. 2016. PADA: Power-aware Development Assistant for Mobile Sensing Applications. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '16). ACM, New York, NY, USA, 946-957. https://doi.org/10.1145/2971648.2971676" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 134, + 559, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 134, + 559, + 182 + ], + "spans": [ + { + "bbox": [ + 316, + 134, + 559, + 182 + ], + "type": "text", + "content": "[28] Chulhong Min, Youngki Lee, Chungkuk Yoo, Seungwoo Kang, Sangwon Choi, Pillsoon Park, Inseok Hwang, Younghyun Ju, Seungpyo Choi, and Junehwa Song. 2015. PowerForecaster: Predicting Smartphone Power Impact of Continuous Sensing Applications at Pre-installation Time. In Proceedings of the 13th ACM Conference on Embedded Networked Sensor Systems (SenSys '15). ACM, New York, NY, USA, 31-44. https://doi.org/10.1145/2809695.2809728" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 182, + 559, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 182, + 559, + 206 + ], + "spans": [ + { + "bbox": [ + 316, + 182, + 559, + 206 + ], + "type": "text", + "content": "[29] Produkt + Projekt Wolfgang Moll. [n. d.]. Age simulation suit GERT - the GERontic Test suit. ([n. d.]). Retrieved 2018-04-13 from http://www.age-simulation-suit.com/" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 206, + 559, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 206, + 559, + 222 + ], + "spans": [ + { + "bbox": [ + 316, + 206, + 559, + 222 + ], + "type": "text", + "content": "[30] Alan Newell and Peter Gregor. 1988. Human computer interaction for people with disabilities. (1988)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 222, + 559, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 222, + 559, + 247 + ], + "spans": [ + { + "bbox": [ + 316, + 222, + 559, + 247 + ], + "type": "text", + "content": "[31] Alan F Newell, Peter Gregor, Maggie Morgan, Graham Pullin, and Catriona Macaulay. 2011. User-sensitive inclusive design. Universal Access in the Information Society 10, 3 (2011), 235-243." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 247, + 559, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 247, + 559, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 247, + 559, + 270 + ], + "type": "text", + "content": "[32] Nvidia. 2016. GeForce GTX 1080 Specifications. (2016). Retrieved 2018-04-13 from https://www.geforce.com/hardware/Desktop-gpus/geforce-gtx-1080/ specifications" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 270, + 559, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 270, + 559, + 294 + ], + "spans": [ + { + "bbox": [ + 316, + 270, + 559, + 294 + ], + "type": "text", + "content": "[33] National Institute on Aging. 2016. World's older population grows dramatically. (28 March 2016). Retrieved 2018-04-13 from https://www.nih.gov/news-events/news-releases/worlds-older-population-grows-dramatically" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 294, + 559, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 294, + 559, + 310 + ], + "spans": [ + { + "bbox": [ + 316, + 294, + 559, + 310 + ], + "type": "text", + "content": "[34] DG Pelli, JG Robson, et al. 1988. The design of a new letter chart for measuring contrast sensitivity. In Clinical Vision Sciences. CiteSeer." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 310, + 559, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 310, + 559, + 334 + ], + "spans": [ + { + "bbox": [ + 316, + 310, + 559, + 334 + ], + "type": "text", + "content": "[35] Android Open Source Project. 2017. SurfaceFlinger and HardwareComposer. (March 2017). Retrieved 2018-04-13 from https://source.android.com/devices/ graphics/arch-sf-hwc" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 334, + 559, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 334, + 559, + 366 + ], + "spans": [ + { + "bbox": [ + 316, + 334, + 559, + 366 + ], + "type": "text", + "content": "[36] Vaibhav Rastogi, Yan Chen, and William Enck. 2013. AppsPlayground: Automatic Security Analysis of Smartphone Applications. In Proceedings of the Third ACM Conference on Data and Application Security and Privacy (CODASPY '13). ACM, New York, NY, USA, 209-220. https://doi.org/10.1145/2435349.2435379" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 366, + 559, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 366, + 559, + 406 + ], + "spans": [ + { + "bbox": [ + 316, + 366, + 559, + 406 + ], + "type": "text", + "content": "[37] Lenin Ravindranath, Suman Nath, Jitendra Padhye, and Hari Balakrishnan. 2014. Automatic and Scalable Fault Detection for Mobile Applications. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 190-203. https://doi.org/10.1145/2594368.2594377" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 316, + 406, + 559, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 406, + 559, + 430 + ], + "spans": [ + { + "bbox": [ + 316, + 406, + 559, + 430 + ], + "type": "text", + "content": "[38] IBM Accessibility Research. 2017. IBM Accessibility Checklist 7.0. (18 July 2017). Retrieved 2018-04-13 from http://www-03.ibm.com/able/guidelines/ci162/accessibility_checklist.html" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 316, + 430, + 559, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 430, + 559, + 453 + ], + "spans": [ + { + "bbox": [ + 316, + 430, + 559, + 453 + ], + "type": "text", + "content": "[39] Justin B. Rousek, Sonja Koneczny, and M. Susan Hallbeck. 2009. Simulating Visual Impairment to Detect Hospital Wayfinding Difficulties. Proceedings of the Human Factors and Ergonomics Society Annual Meeting 53, 8 (Oct. 2009), 531-535." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 316, + 453, + 559, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 453, + 559, + 477 + ], + "spans": [ + { + "bbox": [ + 316, + 453, + 559, + 477 + ], + "type": "text", + "content": "[40] Samsung. 2014. Samsung Galaxy S5 Specifications. (2014). Retrieved 2018-04-13 from http://www.samsung.com/uk/smartphones/galaxy-s5-g900f/SM-G900FZKABTU/" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 316, + 477, + 559, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 477, + 559, + 509 + ], + "spans": [ + { + "bbox": [ + 316, + 477, + 559, + 509 + ], + "type": "text", + "content": "[41] Alvy Ray Smith and James F. Blinn. 1996. Blue Screen Matting. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '96). ACM, New York, NY, USA, 259-268. https://doi.org/10.1145/237170.237263" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 316, + 509, + 559, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 509, + 559, + 525 + ], + "spans": [ + { + "bbox": [ + 316, + 509, + 559, + 525 + ], + "type": "text", + "content": "[42] Herman Snellen. 1873. Probebuchstaben zur bestimmung der sehscharfe. Vol. 1. H. Peters." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 316, + 525, + 559, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 525, + 559, + 556 + ], + "spans": [ + { + "bbox": [ + 316, + 525, + 559, + 556 + ], + "type": "text", + "content": "[43] G. J. Sullivan, J. R. Ohm, W. J. Han, and T. Wiegand. 2012. Overview of the High Efficiency Video Coding (HEVC) Standard. IEEE Transactions on Circuits and Systems for Video Technology 22, 12 (Dec 2012), 1649-1668. https://doi.org/10.1109/TCSVT.2012.2221191" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 316, + 556, + 559, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 556, + 559, + 572 + ], + "spans": [ + { + "bbox": [ + 316, + 556, + 559, + 572 + ], + "type": "text", + "content": "[44] Unity Technologies. [n. d.]. Unity. ([n. d.]). Retrieved 2018-04-13 from https://unity3d.com/" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 316, + 572, + 559, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 572, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 316, + 572, + 559, + 605 + ], + "type": "text", + "content": "[45] Sam Tregillus and Eelke Folmer. 2016. VR-STEP: Walking-in-Place Using Inertial Sensing for Hands Free Navigation in Mobile VR Environments. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). ACM, New York, NY, USA, 1250-1255. https://doi.org/10.1145/2858036.2858084" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 316, + 605, + 559, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 605, + 559, + 629 + ], + "spans": [ + { + "bbox": [ + 316, + 605, + 559, + 629 + ], + "type": "text", + "content": "[46] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing 13, 4 (2004), 600-612." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 316, + 628, + 559, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 628, + 559, + 653 + ], + "spans": [ + { + "bbox": [ + 316, + 628, + 559, + 653 + ], + "type": "text", + "content": "[47] Fabian Werfel, Roman Wiche, Jochen Feitsch, and Christian Geiger. 2016. Empathizing Audiovisual Sense Impairments: Interactive Real-Time Illustration of Diminished Sense Perception. In Proc. of AH." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 316, + 653, + 559, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 653, + 559, + 693 + ], + "spans": [ + { + "bbox": [ + 316, + 653, + 559, + 693 + ], + "type": "text", + "content": "[48] Yu Zhong, Astrid Weber, Casey Burkhardt, Phil Weaver, and Jeffrey P. Bigham. 2015. Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping. In Proceedings of the 12th Web for All Conference (W4A '15). ACM, New York, NY, USA, Article 29, 10 pages. https://doi.org/10.1145/2745555.2747277" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 316, + 692, + 559, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 559, + 709 + ], + "type": "text", + "content": "[49] George J. Zimmerman. 1979. Zimmerman Low Vision Simulation Kit. (1979). Retrieved 2018-04-13 from http://www.lowvisionsimulationkit.com/" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 58, + 282, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 58, + 282, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 58, + 282, + 69 + ], + "type": "text", + "content": "Empath-D: VR-based Empathetic App Design for Accessibility" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 374, + 58, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 58, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 374, + 58, + 558, + 69 + ], + "type": "text", + "content": "MobiSys '18, June 10-15, 2018, Munich, Germany" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_content_list.json b/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..087668607cadc3d98b8e979ff125bdec377dddba --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_content_list.json @@ -0,0 +1,1741 @@ +[ + { + "type": "text", + "text": "R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization", + "text_level": 1, + "bbox": [ + 127, + 128, + 870, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jingyi Zhang Jiaxing Huang Huanjin Yao Shunyu Liu Xikun Zhang Shijian Lu Dacheng Tao Nanyang Technological University, Singapore", + "bbox": [ + 99, + 210, + 895, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 282, + 326, + 299 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent studies generally enhance MLLMs' reasoning capabilities via supervised fine-tuning on high-quality chain-of-thought reasoning data, which often leads models to merely imitate successful reasoning paths without understanding what the wrong reasoning paths are. In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. To this end, we design Step-wise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding. Specifically, StepGRPO introduces two novel rule-based reasoning rewards: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards the reasoning paths that contain necessary intermediate reasoning steps via a soft key-step matching technique, while StepRAR rewards reasoning paths that follow a well-structured and logically consistent reasoning process through a reasoning completeness and logic evaluation strategy. With the proposed StepGRPO, we introduce R1-VL, a series of MLLMs with outstanding capabilities in step-by-step reasoning. Extensive experiments over 8 benchmarks demonstrate the superiority of our methods. Code is available at link.", + "bbox": [ + 89, + 316, + 483, + 680 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 726, + 220, + 742 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multimodal large language models (MLLMs) have achieved significant progress in vision-language understanding [1, 8, 15, 18, 21, 38, 43, 51]. Recent efforts generally enhance MLLMs' reasoning capabilities by employing supervised fine-tuning (SFT) on high-quality chain-of-thought (CoT) reasoning data generated by powerful models (e.g., GPT4) [37, 44, 46, 55]. For example, Mulberry [46] introduces CoMCTS, which utilizes multiple", + "bbox": [ + 89, + 752, + 482, + 875 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg", + "image_caption": [ + "Figure 1. For MLLMs, online reinforcement learning with outcome-level reward, like in Deepseek-R1's GRPO [34], often suffers from sparse reward issues, where only a few reasoning paths can receive positive/high rewards during training, ultimately leading to poor exploration efficiency and unstable learning process. To tackle this, we propose a novel online reinforcement learning framework that incorporates step-wise reasoning rewards in addition to outcome-level rewards, encouraging MLLMs to iteratively refine their reasoning with dense rewards and resulting in a more stable training process and improved reasoning capability. The experiments are conducted on Qwen2-VL-7b over MathVista." + ], + "image_footnote": [], + "bbox": [ + 540, + 282, + 883, + 452 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "models to collectively search and identify effective reasoning paths, followed by SFT on the collected reasoning data. However, SFT approaches focus solely on positive reasoning paths (i.e., those leading to correct answers), while the negative reasoning paths are largely neglected. This limitation may cause the model to merely imitate successful reasoning paths without understanding what the flawed and wrong reasoning paths are.", + "bbox": [ + 511, + 652, + 906, + 776 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. Recent advancements in NLP, such as Deepseek-R1 [13] and Kimi-K1.5 [36], have shown great potential in incentivizing the reasoning capability of LLMs via actively selfexploring. The core design of these advances (e.g., GRPO in Deepseek-R1) lies in online reinforcement learning without the need for reward models, which encourages an LLM", + "bbox": [ + 511, + 779, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.12937v2 [cs.AI] 4 Aug 2025", + "bbox": [ + 22, + 284, + 60, + 710 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Correspondence to: Jiaxing Huang {jiaxing.huang@ntu.edu.sg}.", + "bbox": [ + 112, + 886, + 457, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "to generate a group of reasoning paths and iteratively refine its reasoning process by rewarding the generated reasoning paths based on a rule-based reward function. Typically, an outcome-level reward strategy is used: reasoning paths leading to correct answers receive higher rewards, while those leading to incorrect answers receive lower ones.", + "bbox": [ + 89, + 90, + 480, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "An intuitive idea is to directly apply these simple and effective LLM online reinforcement learning methods for MLLMs. However, relying solely on outcome-level rewards, like in Deepseek-R1's GRPO, often suffers from sparse reward issues on MLLM reasoning learning, resulting in suboptimal performance. Specifically, most MLLMs, especially smaller ones, exhibit very limited capability in long-chain reasoning accuracy and validity, whereas only a few MLLM-generated reasoning paths can receive positive/high rewards. This lack of positive reward signals reduces exploration efficiency and leads to an unstable learning process, as illustrated in Fig. 1.", + "bbox": [ + 89, + 184, + 480, + 364 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We propose to tackle this sparse reward issue by introducing dense step-wise reasoning rewards in addition to sparse outcome-level rewards. To this end, we design Stepwise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding while using no additional process reward models. Specifically, StepGRPO introduces two novel rule-based reasoning reward mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR).", + "bbox": [ + 89, + 367, + 480, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "StepRAR rewards the reasoning path using a soft key-step matching technique that evaluates whether the reasoning path contains key intermediate reasoning steps (i.e., the necessary steps to reach the correct final solution). StepRVR rewards the reasoning path based on a reasoning completeness and logic evaluation method, which assesses whether the reasoning process is well-structured and logically consistent. In this way, StepRAR and StepRVR help mitigate the sparse reward issue by providing informative rewards, even when the reasoning path does not produce the correct final answer – as long as it includes key intermediate reasoning steps or follows a structured and logical reasoning process. With StepRAR and StepRVR, StepGRPO takes the average step-wise reasoning rewards of a group of sampled reasoning paths as a baseline to estimate the advantage for policy optimization. Using the proposed StepGRPO, we develop R1-VL, a series of MLLMs with R1-like step-by-step reasoning capabilities.", + "bbox": [ + 89, + 535, + 482, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The proposed StepGRPO offers two key advantages. 1) Effectiveness. StepGRPO introduces two step-wise reasoning reward mechanisms with group relative optimization, which provide rich and fine-grained step-wise reasoning rewards along the whole reasoning trajectory beyond the final answer. This mitigates the sparse reward issue and encour", + "bbox": [ + 89, + 810, + 480, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ages more structured, logically consistent reasoning trajectories. 2) Efficiency. StepGRPO achieves step-wise reasoning rewarding in a rule-based manner, which provides step-wise reasoning rewards while eliminating the need of process reward models. This significantly reduces computational overhead while maintaining fine-grained step-wise supervisions.", + "bbox": [ + 511, + 90, + 903, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main contributions of this work are threefold. First, we propose StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via a simple, effective and dense step-wise rewarding. Second, we design two novel rule-based reasoning reward mechanisms, i.e., step-wise reasoning accuracy reward and step-wise reasoning validity reward, which effectively mitigate the sparse reward issue for MLLMs without the need of process reward models. Third, with the proposed StepGRPO, we develop R1-VL, a series MLLMs that have superior reasoning capabilities. Forth, extensive experiments over multiple benchmarks show that R1-VL achieves superior performance compared with state-of-the-art MLLMs.", + "bbox": [ + 511, + 198, + 903, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 511, + 424, + 653, + 440 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Multimodal Large Language Model", + "text_level": 1, + "bbox": [ + 511, + 450, + 823, + 465 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multimodal Large Language Models (MLLMs) [1, 8, 15, 18, 21, 38, 43, 51, 52] have shown remarkable advancements across a wide range of vision-language understanding tasks, demonstrating their capabilities in comprehending and analyzing visual contents across various application domains. Early research on MLLMs primarily focuses on text generation based on text prompts and input multiple modalities such as images [20, 21, 53], videos [9, 35]. Recent advancements further enhance the capabilities of MLLMs from various aspects. For example, recent models [25, 42] incorporate multimodal inputs and outputs such as video, audio, and point cloud inputs beyond text and images. In addition, some efforts attempt to adapt MLLMs for domain-specific tasks, such as medical image understanding [17, 19, 56] and document analysis [22, 49]. In this work, we focus on enhancing the reasoning ability of MLLMs in tackling complex reasoning tasks and introduce R1-VL, a series of MLLMs that have superior reasoning capability.", + "bbox": [ + 511, + 473, + 903, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. MLLM Reasoning", + "text_level": 1, + "bbox": [ + 511, + 771, + 692, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by the advances in NLP that show great potential in learning to reason and tackling complex language tasks [29], recent studies attempt to enhance the reasoning capability of MLLM. Generally, current MLLM reasoning methods improve the reasoning capability of MLLM by generating high-quality chain-of-thoughts (CoT) data using powerful model (e.g., GPT-4) and performing supervised", + "bbox": [ + 511, + 794, + 903, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "fine-tuning with the collected data [10, 37, 44, 46, 55]. For example, Mulberry [46] introduces Collective Monte Carlo Tree Search (MCTS) into MLLM and proposes CoMCTS which leverages complementary knowledge from multiple models to collaboratively search and identify effective reasoning paths. In addition, recent works [14, 27, 30, 47] attempt to explore online reinforcement learning to improve the MLLMs' reasoning ability. Different from these works, we design StepGRPO that enables MLLM to self-improve the reasoning ability with step-wise reward signals.", + "bbox": [ + 89, + 90, + 480, + 242 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Reinforcement Learning", + "text_level": 1, + "bbox": [ + 89, + 251, + 316, + 267 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reinforcement Learning (RL) [16] is a fundamental approach in machine learning, where an agent learns to interact with an environment by taking actions, receiving rewards, and updating its policy to maximize the long-term return. With the rise of large language models (LLMs) [4, 28, 31], Reinforcement Learning with Human Feedback (RLHF) [3] has emerged as a key technique for fine-tuning models using human preference data. RLHF leverages algorithms like Proximal Policy Optimization (PPO) [33] and Direct Preference Optimization (DPO) [32] to guide model behavior for improving the alignment, coherence and helpfulness in response generation.", + "bbox": [ + 89, + 272, + 482, + 454 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, RL is increasingly adopted to enhance LLMs' reasoning capabilities [5, 7, 13, 24, 36, 50], especially for mathematical problem solving. The core is to adopt an appropriate reward function or model that evaluates and reinforces high-quality reasoning paths while penalizing low-quality ones, guiding the model's optimization towards more structured and coherent reasoning trajectories using the RL algorithm. For example, ReST-MCTS* [50] trains a process reward model (PRM) for determining the correctness of each reasoning step within reasoning paths. Recent methods have found that using a simple outcome-level rule-based reward function (i.e., the reasoning trajectories leading to correct answer are rewarded with higher score) can already provide an effective and reliable reward signal during the RL process [13, 24, 36]. For example, DeepSeek-R1 [13] demonstrates that group relative policy optimization (GRPO) [34] with outcome-level reward effectively enhances the reasoning capability of LLMs. In this work, we aim for improving the reasoning capability of MLLMs through reinforcement learning and propose StepGRPO, which effectively tackles the sparse reward issue in MLLMs, leading to stable training process and better reasoning capability.", + "bbox": [ + 89, + 454, + 482, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 89, + 814, + 181, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section first presents the task formulation, and then introduces the proposed Step-wise Group Relative Policy Optimization (StepGRPO). More details to be elaborated in the ensuing subsections.", + "bbox": [ + 89, + 839, + 482, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Task Formulation", + "text_level": 1, + "bbox": [ + 513, + 90, + 687, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we consider a pre-trained MLLM and denote it as a policy model $\\pi_{\\theta}$ . Given a multimodal question $Q$ consisting of an image and a textual task instruction, i.e., $Q = \\{\\text{text}, \\text{image}\\}$ , the policy model $\\pi$ generates response $\\mathbf{c}$ with a step-by-step reasoning trajectory. Generally, this process can be formulated as a sequence of next token prediction actions, i.e., $\\mathbf{c} = (a_1, a_2, \\dots, a_t, \\dots, a_T)$ , where each action $a_t$ is sampled from the policy model $\\pi_{\\theta}$ and $T$ represents the maximum sequence length. After each action, the new state $s_{t+1}$ is determined by updating the current state $s_t$ with the newly generated action $a_t$ , i.e., $s_{t+1} = (s_t, a_t)$ , $1 \\leq t \\leq T$ .", + "bbox": [ + 511, + 112, + 903, + 294 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Considering this formulation, the objective of our task is to optimize the policy model $\\pi_{\\theta}$ such that it can select better actions based on the previous states, thereby improving reasoning quality. In the context of reinforcement learning (RL), the policy model is generally optimized by maximizing the cumulative reward, where the reward for taking action $a_{t}$ at state $s_t$ is denoted as $r(s_t,a_t,s_{t + 1})$ . Following prior studies [46], we define an action in this paper as generating a reasoning step, which consists of one or more sentences containing multiple word tokens.", + "bbox": [ + 511, + 294, + 905, + 445 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Step-wise Group Relative Policy Optimization", + "text_level": 1, + "bbox": [ + 511, + 453, + 901, + 469 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We propose Step-wise Group Relative Policy Optimization (StepGRPO), a novel online reinforcement fine-tuning framework that mitigates the sparse reward issue for MLLMs and encourages self-improvement in reasoning ability through simple, effective and dense step-wise reward mechanisms. As illustrated in Fig. 2, StepGRPO consists of two phases: (1) a policy warm-up phase and (2) a step-wise online policy optimization phase. The overall algorithm is shown in Algorithm 1.", + "bbox": [ + 511, + 474, + 905, + 611 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1. Policy Warm-up", + "text_level": 1, + "bbox": [ + 511, + 619, + 671, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This phase equips the policy model with fundamental reasoning capabilities, ensuring it can generate proper stepwise reasoning paths before reinforcement learning. During the warm-up phase, the policy model is fine-tuned using a multimodal dataset $D_{s}$ with Chain-of-Thought (CoT) reasoning path, where each data consists of a multimodal question $Q$ and a step-by-step reasoning path $\\tau$ , i.e., $D_{s} = \\{Q^{n}, \\tau^{n}\\}_{n=1}^{N}$ :", + "bbox": [ + 511, + 638, + 903, + 760 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {w a r m - u p}} = - \\mathbb {E} _ {\\tau \\sim D _ {s}} [ \\sum_ {t = 1} ^ {T} \\log (\\pi_ {\\theta} (a _ {t} | s _ {t})) ]. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 768, + 903, + 811 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.2. Step-wise Online Policy Optimization", + "text_level": 1, + "bbox": [ + 511, + 821, + 818, + 835 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This phase enables MLLMs to self-improve their reasoning ability via online reinforcement learning, mitigating the sparse reward issue through step-wise reasoning rewards. As illustrated in Fig. 2, for each question $Q \\in D_{s}$ ,", + "bbox": [ + 511, + 839, + 903, + 901 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg", + "image_caption": [ + "Question: In the given diagram, triangle ABC has AD as its median and point E is the midpoint of AD. If the area of triangle ABC is 12, what is the area of triangle ABE?" + ], + "image_footnote": [], + "bbox": [ + 99, + 88, + 205, + 146 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg", + "image_caption": [ + "Answer: Step 1: Since AD is a median, it divides triangle ABC into two equal areas: ABD and ACD. Step 2: Segment AE is half of AD, splitting triangle ABD into two triangles of equal area: ABE and BED. Step 3: The area of triangle ABD is half of triangle ABC, which is $\\frac{\\text{frac}}{12} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 6$ . Step 4: Since E is the midpoint of AD, triangle ABE is half of triangle ABD. Therefore, the area of triangle ABE is $\\frac{\\text{frac}}{6} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 3$ . The final answer is 3.", + "(a) Step-wise Reasoning Accuracy Reward", + "Figure 2. Overview of the proposed StepGRPO. StepGRPO consists of two phases: a policy warm-up phase and a step-wise online policy optimization phase. After the warm-up, the policy model $\\pi_{\\theta}$ generates a group of reasoning paths $\\{\\mathbf{c}^i\\}_{i=1}^M$ and assigns step-wise rewards using two proposed mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards reasoning paths that contain key intermediate steps, identified using a soft key-step matching technique. StepRVR rewards reasoning paths based on completeness and logical consistency, ensuring they are well-structured. StepGRPO then estimates the advantage $\\hat{A}$ for policy optimization by using the average step-wise reasoning reward of a group of sampled reasoning paths as a baseline. Examples for StepRAR and StepRVR are illustrated in (a) and (b), respectively." + ], + "image_footnote": [], + "bbox": [ + 99, + 152, + 893, + 308 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pre-extracted key steps with Augmentations:", + "text_level": 1, + "bbox": [ + 106, + 330, + 259, + 354 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. AD is a median; median is $AD$", + "2. equal area; ...", + "3. AE is half of AD; $AE = 1 / 2AD$", + "4. frac{12}{2} {2} = 6; $\\underline{12 / 2} = 6,\\dots$", + "5. E is the midpoint; ..", + "6. frac{6}{2} = 3; 6/2 = 3. ..." + ], + "bbox": [ + 104, + 357, + 259, + 420 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Soft key-step matching :", + "text_level": 1, + "bbox": [ + 274, + 330, + 405, + 342 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Description: The image shows ...; #Rationale: The question asks for the area...; #Step1: ... we find AD is a median of ...; #Step2: ... AE splits triangle ABD ...; #Step3: ... The area of triangle ABD is $12/2 = 6$ , ..., and the area of triangle ABE is frac{6}{2} = 3. #The final answer is: 3. Step-wise Matching score: 3/6", + "bbox": [ + 274, + 342, + 519, + 422 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(b) Step-wise Reasoning Validity Reward", + "text_level": 1, + "bbox": [ + 527, + 314, + 746, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Description $\\rightarrow$ #Rationale $\\rightarrow$ # Step1 $\\rightarrow$ ... $\\rightarrow$ #Step $N\\rightarrow$ #Answer.", + "bbox": [ + 532, + 330, + 885, + 343 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "i. Reasoning completeness", + "bbox": [ + 532, + 345, + 668, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Description $\\rightarrow$ #Rationale $\\rightarrow$ #Answer. Missing reasoning steps", + "bbox": [ + 532, + 357, + 877, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Description $\\rightarrow$ # Step1 $\\rightarrow$ ... $\\rightarrow$ #Step $N\\rightarrow$ #Answer. Missing rationale", + "bbox": [ + 532, + 369, + 895, + 383 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ii. Reasoning logic", + "bbox": [ + 532, + 386, + 629, + 397 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Description $\\rightarrow$ #Rationale $\\rightarrow$ #Answer $\\rightarrow$ #Step1... $\\rightarrow$ #StepN. X \n#Description $\\rightarrow$ #Step3 $\\rightarrow$ #Rationale $\\rightarrow$ ... $\\rightarrow$ #Step I $\\rightarrow$ #Answer X", + "bbox": [ + 532, + 397, + 885, + 422 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the policy model $\\pi_{\\theta}$ first generates a group of $M$ reasoning trajectories via multiple rollouts, i.e., $\\{\\mathbf{c}^i\\}_{i=1}^M$ , where $\\mathbf{c}^i = (a_1^i, a_2^i, \\ldots, a_t^i, \\ldots, a_T^i)$ . After obtaining a group of $M$ reasoning trajectories, we employ our proposed step-wise reasoning rewards to evaluate and reward each generated reasoning trajectory. Specifically, we introduce two types of rule-based step-wise rewards, i.e., step-wise reasoning accuracy (StepRAR) reward and step-wise reasoning validity reward (StepRVR).", + "bbox": [ + 88, + 565, + 482, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Step-wise reasoning accuracy reward (StepRAR) reduces the effect of learning from sparse reward by additionally rewarding reasoning paths that contain correct intermediate reasoning steps contributing to the final solution. Specifically, for each question $Q$ , we pre-extract a set of key reasoning steps $\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}$ from the corresponding reasoning path $\\tau$ in dataset $D_{s}$ . We define key steps as the essential variables and equations that directly contribute to the final solution, and prompt GPT-4 to extract several key steps from the reasoning path for each question. To ensure efficient reward assignment, we refine the extracted steps by removing redundant content and retaining only the core few words necessary for reasoning. Furthermore, we", + "bbox": [ + 88, + 704, + 482, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "augment each extracted key step into multiple equivalent formats to allow more flexible and accurate matching, preventing missed matches due to math-related formatting differences. For example, a mathematical expression such as \" $\\frac{6}{3} = 2$ \" is augmented to \"6/3 = 2\" or \"6 divided by 3 equals 2\".", + "bbox": [ + 511, + 565, + 906, + 655 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With the extracted key reasoning steps $\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}$ and such soft marching mechanism, we calculate a match score for each generated reasoning path based on the ratio of matched key steps, i.e., $k^{i} = |\\mathbf{v}_{\\text{match}}| / |\\mathbf{v}|$ . Then, StepRAR for $1 \\leq t \\leq T$ is defined as:", + "bbox": [ + 511, + 655, + 906, + 731 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nr _ {a u c} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1 + \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = y, \\\\ \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) \\neq \\text {n u l l}, \\neq y, \\\\ 0, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = \\text {n u l l}, \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 517, + 739, + 903, + 809 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $y$ is the ground-truth answer extracted from CoT reasoning path.", + "bbox": [ + 511, + 810, + 903, + 840 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By leveraging pre-extracted key reasoning steps, StepRAR efficiently provides additional supervision with a simple soft matching mechanism, ensuring the model learns meaningful reasoning processes instead of guessing", + "bbox": [ + 511, + 840, + 905, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "answers randomly.", + "bbox": [ + 89, + 90, + 217, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Step-wise reasoning validity reward (StepRVR) aims for ensuring the generated paths adhere to a logically structured and coherent progression beyond the reasoning accuracy. Prior studies [44, 46] have demonstrated structural reasoning, such as problem decomposition and progressive reasoning, facilitates more accurate and interpretable reasoning processes, as they encourage models to break down complex problems into multiple intermediate steps rather than direct answer generation.", + "bbox": [ + 89, + 106, + 483, + 242 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inspired by these findings, we incorporate step-wise reasoning validity to reinforce well-organized reasoning paths that follow an expected logical flow. Specifically, we define StepRVR using two key criteria: reasoning completeness $\\delta^c$ and reasoning logic $\\delta^l$ . Reasoning completeness requires the response to include three essential components, i.e., a background analysis involving image description and rationale analysis to establish context, a step-by-step reasoning process and a final answer. In addition to the reasoning completeness, reasoning logic ensures the reasoning path to follow a logical progression, where the background analysis must come before solution steps and the final answer should only appear after reasoning steps are complete.", + "bbox": [ + 89, + 244, + 483, + 440 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With these two criteria, we define StepRVR as", + "bbox": [ + 109, + 441, + 418, + 455 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {v a l} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\mathbb {I} \\left(\\delta^ {c} \\left(s _ {t + 1}\\right)\\right) \\cdot \\mathbb {I} \\left(\\delta^ {l} \\left(s _ {t + 1}\\right)\\right) = 1, \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 469, + 483, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the reasoning trajectory is rewarded only if it satisfies both completeness and logical coherence. By enforcing this, StepRVR helps the model produce structured, interpretable and logically sound reasoning trajectories, enhancing both the quality and reliability of generated responses.", + "bbox": [ + 89, + 522, + 483, + 598 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Optimization with the step-wise rewards. After obtaining two types of step-wise rewards, we compute the overall reward for each reasoning path as $r^i = r_{auc}^i + r_{val}^i$ , and repeatedly compute the rewards for all generated reasoning paths, i.e., $\\{r^1, r^2, \\dots, r^M\\}$ .", + "bbox": [ + 89, + 599, + 483, + 675 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To estimate the advantage of each reasoning trajectory, we normalize its reward relative to the group as follow:", + "bbox": [ + 89, + 676, + 482, + 705 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} ^ {i} = \\frac {r ^ {i} - \\operatorname {m e a n} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 717, + 482, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the mean group reward serves as the baseline, and $\\hat{A}_i$ measures how much better or worse $r_i$ is compared to other reasoning trajectories within the group. Following this, we optimize the policy model with the loss defined as:", + "bbox": [ + 89, + 767, + 483, + 829 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\text {S t e p R L}} = - \\underset {Q \\in D _ {s}} {\\mathbb {E}} \\left[ \\frac {1}{M} \\sum_ {i = 1} ^ {M} \\left(\\frac {\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\left[ \\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right) \\right] _ {\\text {n o g r a d}}} \\hat {A} ^ {i} \\right. \\right. \\tag {5} \\\\ - \\beta D _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right) ], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 842, + 482, + 902 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Step-wise Group Relative Policy Optimization" + ], + "code_body": "Input: Policy model $\\pi_{\\theta}$ initialized by a pre-trained \nMLLM; a multimodal dataset $D_{s} = \\{Q^{n},\\tau^{n}\\}_{n = 1}^{N}$ \nOutput: Trained policy model $\\pi_{\\theta}$ \nPolicy warm-up: \nfor iter $= 1$ to $N$ do Sample $\\{Q,\\tau \\} \\in D_s$ Optimize policy model $\\pi_{\\theta}$ by Eq. 1 \nend for \nStep-wise online policy optimization: \nfor iter $= 1$ to $N$ do Sample $\\{Q,\\tau \\} \\in D_s$ Generate a group of reasoning paths $\\{\\mathbf{c}^i\\}_{i = 1}^M\\sim \\pi_\\theta$ Obtain step-wise rewards $\\{r^i\\}_{i = 1}^M$ by Eqs. 2-3 Obtain relative advantages $\\{\\hat{A}^i\\}_{i = 1}^M$ by Eq. 4 Optimize policy model $\\pi_{\\theta}$ by Eqs. 5-6 \nend for \nreturn policy model $\\pi_{\\theta}$", + "bbox": [ + 516, + 108, + 903, + 372 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where KL divergence is adopted to regularize the policy model, preventing excessive deviation from the reference model. The reference model is typically initialized as the same model as the policy model but remains frozen during RL training. The KL divergence between the policy model and the reference model is estimated as in [34]:", + "bbox": [ + 511, + 397, + 906, + 489 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nD _ {K L} \\left(\\pi_ {\\theta} \\right\\| \\pi_ {r e f} = \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - \\log \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - 1. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 498, + 906, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 513, + 541, + 638, + 558 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This section presents experiments including datasets and implementation details, main experimental results, ablation studies and discussion, respectively. More details are to be described in the ensuing subsections.", + "bbox": [ + 511, + 566, + 906, + 627 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets", + "text_level": 1, + "bbox": [ + 511, + 636, + 614, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For policy warm-up, we adopt Mulberry-260k [46] for supervised fine-tuning. For step-wise online policy optimization, we randomly sample 10K data from Mulberry-260k as our training data. For evaluation, we adopt 8 widely-used multimodal benchmarks for comprehensively evaluating our proposed StepGRPO, including MathVista [23], MMStar [6], Math-Vision [40], ChartQA [26], DynaMath [57], HallusionBench [12], MathVerse [54], MME [11] and MM-Reason [45]. These multimodal benchmarks cover a wide range of tasks from mathematical reasoning, chart understanding, visual hallucination and general visual understanding.", + "bbox": [ + 511, + 657, + 906, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 848, + 730, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our proposed StepGRPO is generally applicable to different MLLMs. In our experiments, we adopt two state-of-the-art", + "bbox": [ + 511, + 869, + 906, + 900 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMathVistaMMStarMath-VChartQADynaMathHallBenchMathVerseMMEsumMMReasonAVG
Closed-Source Model
GPT-4o [15]63.863.930.385.763.755.039.4232921.156.2
Claude-3.5 Sonnet [1]67.762.2-90.864.855.0-1920--
Open-Source Model
Cambrain-1-8B [38]49.0--73.3------
MM-1.5-7B [51]47.6--78.6---1861--
Idefics3-LLaMA3-8B [18]58.455.9-74.8---1937--
InternVL2-8B [8]58.361.5-83.339.7--2210--
MiniCPM-V-2.6-8B [48]60.657.5---48.1-2348--
DeepSeek-VL2-MOE-4.5B [43]62.861.3-86.0---225311.5-
Reasoning Model
LLaVA-CoT-11B [44]54.857.6---47.8----
LLaVA-Reasoner-8B [55]50.654.0-83.0------
Insight-V-8B [10]49.857.4-77.4---2069--
Mulberry-7B [46]63.161.3-83.945.154.1-239611.8-
LlamaV-o1-11B [37]54.459.4---63.5----
Vision-R1-7B [14]73.5-----52.4---
LMM-R1 [30]63.258.026.3---41.5---
R1-ShareVL-7B [47]75.467.029.5---52.8---
Qwen2-VL-2B [41]43.048.012.473.524.941.719.718727.737.5
R1-VL-2B (Ours)52.149.817.175.229.444.026.220488.341.6
Qwen2-VL-7B [41]58.260.716.383.042.150.632.5232711.948.7
R1-VL-7B (Ours)63.560.024.783.945.254.740.0237612.552.1
Qwen2.5-VL-7B [2]68.263.925.187.353.252.149.2234717.355.5
R1-VL-7B* (Ours)74.366.228.287.756.557.252.2239517.958.4
", + "bbox": [ + 96, + 88, + 911, + 486 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg", + "table_caption": [ + "Table 1. Main experimental results. To comprehensively examine the proposed StepGRPO, we conduct extensive experiments with two baseline models on eight benchmarks, and compare StepGRPO with various state-of-the-art MLLMs.* indicates that the model is trained using Qwen2.5-VL-7B as the base model with the data from [47]." + ], + "table_footnote": [], + "table_body": "
Warm-upStep-wise reasoning rewardsMathVista
StepRARStepRVR
58.2
61.2
62.4
61.9
63.5
", + "bbox": [ + 101, + 563, + 478, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Ablation study of StepGRPO over Qwen2-VL-7B.", + "bbox": [ + 109, + 691, + 462, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "open-source MLLMs, i.e., Qwen2-VL-2B and Qwen2-VL-7B [41]. For the policy warm-up phase, we set the training batch size to 128. Following prior work [46], we use a learning rate of $1\\mathrm{e}^{-5}$ for Qwen2-VL-2B and $5\\mathrm{e}^{-6}$ for Qwen2-VL-7B, respectively.", + "bbox": [ + 89, + 733, + 482, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For the step-wise online policy optimization phase, we perform 4 rollouts per question $(M = 4)$ and set the sampling temperature to 1.2 to encourage diverse reasoning paths. The maximum sequence length is set to $L = 1024$ , ensuring that the model can generate complete reasoning paths. Both the policy model and reference model are ini", + "bbox": [ + 89, + 810, + 483, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tialized from the model after the warm-up, with the reference model frozen during RL training. The policy model's learning rate is $1\\mathrm{e}^{-6}$ , and we set the batch size to 4. We set the coefficient of match score $\\alpha$ to 0.1 to balance its effect. Following [39], the KL divergence coefficient $\\beta$ in Eq. 5 is set to 0.04 by default. All experiments are conducted on 4 H100-80GB GPUs.", + "bbox": [ + 511, + 566, + 906, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Main Experimental Results", + "text_level": 1, + "bbox": [ + 511, + 681, + 761, + 698 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct a comprehensive evaluation of R1-VL across eight widely used benchmarks, comparing it with various state-of-the-art MLLMs, as shown in Table 1.", + "bbox": [ + 511, + 704, + 906, + 748 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first compare R1-VL with its baseline models, Qwen2-VL-2B and Qwen2-VL-7B. The baseline models exhibit limited reasoning capability, leading to very few reasoning paths receiving rewards, which negatively impacts the reasoning capability. In contrast, R1-VL with our proposed StepGRPO consistently improves the baseline models by significant margins, achieving $4.6\\%$ improvement over Qwen2-VL-2B and $3.8\\%$ over Qwen2-VL-7B. This improvement is largely attributed to that StepGRPO introduces step-wise reasoning accuracy and validity rewards,", + "bbox": [ + 509, + 750, + 908, + 902 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Number of generations M per question
Method23456
R1-VL-7B62.562.863.563.263.7
", + "bbox": [ + 94, + 88, + 480, + 154 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "which provide rich and informative supervision at each reasoning step, effectively mitigating the sparse reward issue for MLLMs.", + "bbox": [ + 89, + 219, + 482, + 263 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In addition, we compare R1-VL with existing state-of-the-art reasoning MLLMs. As shown in Table 1, R1-VL achieves better performance on most benchmarks, particularly in mathematical reasoning tasks. For example, R1-VL-7B surpasses Mulberry-7B and LlamaV-o1-11B by $0.6\\%$ and $9.3\\%$ respectively on the reasoning-intensive benchmark MathVista. Notably, R1-VL-2B even outperforms larger MLLMs. For instance, R1-VL-2B largely outperforms LLaVA-Reasoner-8B and LLaVA-CoT-11B by $13.1\\%$ and $9.3\\%$ on MathVista, respectively. This superior performance demonstrates that StepGRPO effectively enhances MLLMs' reasoning abilities by encouraging self-improvement via step-wise online reinforcement learning, rather than merely imitating positive reasoning paths.", + "bbox": [ + 88, + 265, + 482, + 476 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we benchmark R1-VL against general MLLMs, including closed-source models such as GPT-4o and Claude-3.5 Sonnet, as well as open-source models like Cambrain-1-8B and DeepSeek-VL2-MOE-4.5B. We observe that R1-VL outperforms most open-source MLLMs and achieves competitive results against closed-source models. For example, R1-VL-7B achieves 63.7 accuracy on MathVista, closely matching GPT-4o's accuracy of 63.8. These results further validate StepGRPO's effectiveness in enhancing the reasoning capabilities of MLLMs.", + "bbox": [ + 88, + 477, + 482, + 628 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 637, + 243, + 652 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct ablation studies for StepGRPO on Qwen2-VL-7B over MathVista benchmark for examining the effect of step-wise reasoning rewards including step-wise reasoning accuracy reward (StepRAR) and step-wise reasoning validity reward (StepRVR), as well as the role of the warm-up phase. As shown in Table 2, involving a warm-up stage improves baseline model to $61.2\\%$ , allowing the model to learn basic reasoning knowledge before reinforcement learning. In addition, including either StepRAR or StepRVR into the online reinforcement learning process outperforms the model with warm-up by large margins, demonstrating that both two types of step-wise rewards contribute to enhancing step-by-step reasoning capabilities. The best performance (i.e., $63.7\\%$ ) is achieved when both StepRAR and StepRVR are applied together. This shows that StepGRPO effectively improves complex", + "bbox": [ + 88, + 657, + 482, + 901 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg", + "table_caption": [ + "Table 3. Parameter analysis of $M$ . The experiments are conducted on Qwen2-VL-7B over MathVista." + ], + "table_footnote": [], + "table_body": "
MethodMathVista
Warm-up61.7
Warm-up + Outcome-level reward62.3
Warm-up + Step-wise reward (Ours)63.5
", + "bbox": [ + 517, + 88, + 903, + 162 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. Effectiveness of the step-wise reasoning rewards. The experiments are conducted on Qwen2-VL-7B over MathVista.", + "bbox": [ + 511, + 174, + 906, + 202 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg", + "image_caption": [ + "Figure 3. Comparison between StepGRPO and SFT. The experiments are conducted on Qwen2-VL-7B over MathVista." + ], + "image_footnote": [], + "bbox": [ + 537, + 219, + 857, + 392 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "reasoning tasks by reinforcing both the correctness of intermediate steps and the overall logical structure of the reasoning process.", + "bbox": [ + 511, + 460, + 905, + 506 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Discussion", + "text_level": 1, + "bbox": [ + 511, + 516, + 632, + 531 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Parameter analysis. We conduct the parameter analysis on the number of generations $M$ in a group with Qwen2-VL7B over benchmark MathVista, analyzing its impact on reasoning performance. As described in Section 3, $M$ controls the number of generated reasoning trajectories per question during the RL phase. Table 3 shows that a larger $M$ generally leads to better performance. This is because, in group relative optimization, the baseline reward is estimated as the average reward of all generated reasoning paths. A larger $M$ results in a more stable and accurate baseline estimation, whereas a small $M$ may lead to high variance in baseline estimation, making the optimization process less reliable. However, increasing $M$ also introduces higher computational costs. Therefore, we set $M = 4$ as the default to balance performance and computational efficiency.", + "bbox": [ + 511, + 537, + 906, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of the step-wise reward. Our proposed stepwise reward mechanism plays a crucial role in mitigating the sparse reward issue by providing fine-grained supervision at each reasoning step. To further validate its effectiveness, we conduct an experiment comparing outcome-level reward against our step-wise reward. Specifically, we evaluate three settings: (1) Warm-up only; (2) Warm-up + Outcome-level Reward, where the model is optimized with outcome-level reward; and (3) Warm-up + Step-wise Re", + "bbox": [ + 511, + 765, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg", + "image_caption": [ + "Question: Subtract all red things. \nSubtract all tiny matte balls. How many objects are left?", + "Correct answer: 5", + "Figure 4. Qualitative comparison." + ], + "image_footnote": [], + "bbox": [ + 96, + 155, + 259, + 244 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qwen2-VL-7B:", + "bbox": [ + 279, + 93, + 349, + 103 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "First, identify the red objects in the image. There is 1 red object, which is a small red sphere. Incorrect reasoning", + "bbox": [ + 279, + 106, + 566, + 127 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Then, subtract the red object from the total number of objects. There are 7 objects in total and subtracting the 1 red object leaves 6 objects. Identify the tiny matte balls in the image. There are 2 tiny matte balls, which are the small green cylinder and the cyan sphere. Subtract the tiny matte balls from the remaining objects.", + "bbox": [ + 279, + 127, + 571, + 176 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "After subtracting the red object, there are 6 objects left. Subtracting the 2 tiny matte balls leaves 4 objects. Answer: 4", + "bbox": [ + 279, + 176, + 550, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Mulberry-7B:", + "text_level": 1, + "bbox": [ + 279, + 205, + 341, + 215 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Image Description: The image shows several 3D objects on a flat surface. There are two red objects: a small red sphere and a larger green cylinder. Other objects include a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a teal sphere.", + "bbox": [ + 277, + 219, + 581, + 253 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Rationales: The question asks to subtract all red things and all tiny matte balls. We need to identify these objects in the image and count the remaining ones.", + "bbox": [ + 277, + 255, + 576, + 281 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Let's think step by step.", + "bbox": [ + 279, + 282, + 401, + 292 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 1: Identify and count all red objects. There is one red sphere. Incorrect reasoning Step 2: Identify and count all tiny matte balls. There is one teal sphere, which is not red, so it is not subtracted.", + "bbox": [ + 279, + 294, + 578, + 333 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 3: Subtract the red objects and tiny matte balls from the total number of objects: $7 - 1 = 6$", + "bbox": [ + 279, + 333, + 571, + 354 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The final answer is: 6", + "bbox": [ + 279, + 356, + 393, + 364 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "R1-VL-7B (Ours):", + "text_level": 1, + "bbox": [ + 602, + 95, + 679, + 106 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Image Description:", + "bbox": [ + 602, + 112, + 705, + 121 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The image shows a collection of 3D geometric objects on a flat surface. There are various colored shapes, including a red sphere, a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a cyan sphere.", + "bbox": [ + 602, + 121, + 890, + 152 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Rationales:", + "bbox": [ + 602, + 160, + 671, + 169 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The question asks to subtract all red things and all tiny matte balls, then determine how many objects are left. This involves identifying and counting specific objects based on their color and size.", + "bbox": [ + 602, + 169, + 888, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Let's think step by step.", + "bbox": [ + 602, + 208, + 725, + 218 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 1:", + "bbox": [ + 602, + 224, + 653, + 232 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Identify and count all objects in the image. There are seven objects: a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, a red sphere, and a cyan sphere.", + "bbox": [ + 602, + 232, + 885, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 2:", + "bbox": [ + 602, + 263, + 653, + 272 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Subtract all red things. There is one red sphere.", + "bbox": [ + 602, + 272, + 808, + 281 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 816, + 270, + 831, + 281 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 3:", + "bbox": [ + 602, + 287, + 653, + 296 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Subtract all tiny matte balls from the remaining objects. There is one cyan sphere, which is matte and small.", + "bbox": [ + 602, + 296, + 885, + 314 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Step 4:", + "bbox": [ + 602, + 319, + 653, + 328 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Count the remaining objects after removing the red sphere and the cyan sphere.", + "bbox": [ + 602, + 328, + 880, + 345 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The final answer is: 5", + "bbox": [ + 602, + 351, + 735, + 361 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ward, where the model is optimized with our proposed stepwise reasoning reward. As shown in Table 4, both outcome-level reward and our step-wise reward improve the warm-up model's performance, while our step-wise reward achieves better performance. This further demonstrates that stepwise rewards are more effective in enhancing MLLMs' reasoning capabilities, as they provide more fine-grained supervision and largely mitigate the sparse reward issue.", + "bbox": [ + 88, + 424, + 482, + 545 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison to supervised fine-tuning (SFT). As discussed before, StepGRPO encourages MLLM to self-improve the reasoning ability with step-wise reward signals rather than merely imitating the successful reasoning paths. Here, we conduct experiments to further compare StepGRPO with SFT. Specifically, we start with the model after the warm-up and conduct the experiments with Qwen2-VL-7B over MathVista. As shown in Fig. 3, under the same number of training steps, StepGRPO consistently outperforms SFT, demonstrating the effectiveness of step-wise reinforcement learning. This is largely attributed to StepGRPO's ability to refine reasoning trajectories through self-exploration and reward-guided optimization, rather than solely relying on passive imitation of reasoning paths. By leveraging step-wise reasoning rewards, StepGRPO provides more rich and informative supervision, leading to better reasoning processes compared to SFT.", + "bbox": [ + 89, + 547, + 482, + 805 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative comparison. We provide qualitative comparison of Qwen2VL-7B, Mulberry-7B and our R1-VL-7B. As shown in Fig. 4, Qwen2-VL-7B generates relatively short responses, lacking a thorough reasoning process. While Mulberry-7B generates detailed reasoning paths, its intermediate steps contain errors, leading to incorrect final an", + "bbox": [ + 89, + 810, + 483, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "svers. In contrast, R1-VL-7B enables more accurate step-by-step reasoning process.", + "bbox": [ + 511, + 424, + 903, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We provide more discussions, experimental results and qualitative analysis in the appendix.", + "bbox": [ + 511, + 454, + 903, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 497, + 633, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper presents StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise reward mechanism. Specifically, StepGRPO introduces two rule-based reasoning reward mechanisms, i.e., Step-wise Reasoning Accuracy Reward that rewards the intermediate reasoning steps based on a soft key-step matching technique and Step-wise Reasoning Validity Reward that rewards the reasoning path's reasoning structure and logical consistency though a reasoning completeness and logic evaluation method. In this way, StepGRPO enables to effectively mitigate the sparse reward issue for MLLMs without the need of process reward models and encourages more structured and logically consistent reasoning process. With the proposed StepGRPO, we develop R1-VL, a series of MLLMs with superior reasoning capability. Extensive experiments over eight benchmarks demonstrate the superiority of the proposed StepGRPO compared with the state-of-the-art MLLMs.", + "bbox": [ + 511, + 523, + 906, + 808 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL).", + "bbox": [ + 511, + 810, + 905, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 90, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Anthropic. Claude 3.5 sonnet, 2024. 1, 2, 6", + "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6", + "[3] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. 3", + "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3", + "[5] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. arXiv preprint arXiv:2406.10858, 2024. 3", + "[6] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.5", + "[7] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. 3", + "[8] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 2, 6", + "[9] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 2", + "[10] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. 3, 6", + "[11] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 5", + "[12] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 5", + "[13] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint" + ], + "bbox": [ + 93, + 114, + 482, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "arXiv:2501.12948,2025.1,3", + "[14] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Zhe Xu, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 3, 6", + "[15] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 2, 6", + "[16] Leslie Pack Kaelbling, Michael L Littman, and Andrew W Moore. Reinforcement learning: A survey. Journal of artificial intelligence research, 4:237-285, 1996. 3", + "[17] Xiang Lan, Feng Wu, Kai He, Qinghao Zhao, Shenda Hong, and Mengling Feng. Gem: Empowering mllm for grounded ecg understanding with time series and images. arXiv preprint arXiv:2503.06073, 2025. 2", + "[18] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. In Workshop on Responsibly Building the Next Generation of Multimodal Foundational Models, 2024. 1, 2, 6", + "[19] Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv preprint arXiv:2306.00890, 2023. 2", + "[20] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. 2", + "[21] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2", + "[22] Yuliang Liu, Biao Yang, Qiang Liu, Zhang Li, Zhiyin Ma, Shuo Zhang, and Xiang Bai. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024. 2", + "[23] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5", + "[24] Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. Reft: Reasoning with reinforced fine-tuning. arXiv preprint arXiv:2401.08967, 2024. 3", + "[25] Chenyang Lyu, Minghao Wu, Longyue Wang, Xinting Huang, Bingshuai Liu, Zefeng Du, Shuming Shi, and Zhaopeng Tu. Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration. arXiv preprint arXiv:2306.09093, 2023. 2", + "[26] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5", + "[27] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Tiancheng Han, Botian Shi, Wenhai Wang, Junjun He, et al. Mm-eureka: Exploring the frontiers of multimodal reasoning with rule-based reinforce" + ], + "bbox": [ + 516, + 92, + 903, + 891 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ment learning. arXiv preprint arXiv:2503.07365, 2025. 3", + "[28] OpenAI. Gpt-4 technical report, 2023. 3", + "[29] OpenAI. Introducing openai o1, 2024. 2", + "[30] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 3, 6", + "[31] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. 3", + "[32] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 3", + "[33] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. 3", + "[34] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 1, 3, 5", + "[35] Guangzhi Sun, Wenyi Yu, Changli Tang, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, Yuxuan Wang, and Chao Zhang. video-salmonn: Speech-enhanced audio-visual large language models. arXiv preprint arXiv:2406.15704, 2024. 2", + "[36] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 1, 3", + "[37] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamavol: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. 1, 3, 6", + "[38] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 1, 2, 6", + "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.6", + "[40] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2025. 5", + "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 6", + "[42] Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng" + ], + "bbox": [ + 91, + 90, + 482, + 890 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chua. Next-gpt: Any-to-any multimodal lIm. arXiv preprint arXiv:2309.05519, 2023. 2", + "[43] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. 1, 2, 6", + "[44] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. 1, 3, 5, 6", + "[45] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. MMreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. 5", + "[46] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. 1, 3, 5, 6", + "[47] Huanjin Yao, Qixiang Yin, Jingyi Zhang, Min Yang, Yibo Wang, Wenhao Wu, Fei Su, Li Shen, Minghui Qiu, Dacheng Tao, et al. R1-sharev1: Incentivizing reasoning capability of multimodal large language models via share-grpo. arXiv preprint arXiv:2505.16673, 2025. 3, 6", + "[48] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 6", + "[49] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Yuhao Dan, Chenlin Zhao, Guohai Xu, Chenliang Li, Junfeng Tian, et al. mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499, 2023. 2", + "[50] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. 3", + "[51] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 1, 2, 6", + "[52] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2", + "[53] Jingyi Zhang, Jiaxing Huang, Xiaoqin Zhang, Ling Shao, and Shijian Lu. Historical test-time prompt tuning for vision foundation models. Advances in Neural Information Processing Systems, 37:12872-12896, 2024. 2", + "[54] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186." + ], + "bbox": [ + 516, + 92, + 905, + 893 + ], + "page_idx": 9 + }, + { + "type": "ref_text", + "text": "Springer, 2024. 5", + "bbox": [ + 125, + 90, + 230, + 104 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[55] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. 1, 3, 6", + "[56] Xiaoman Zhang, Chaoyi Wu, Ziheng Zhao, Weixiong Lin, Ya Zhang, Yanfeng Wang, and Weidi Xie. Pmc-vqa: Visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415, 2023. 2", + "[57] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 5" + ], + "bbox": [ + 93, + 106, + 480, + 294 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_model.json b/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_model.json new file mode 100644 index 0000000000000000000000000000000000000000..fe4fffd5d846a173d7c11b6214b2867708329552 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_model.json @@ -0,0 +1,2466 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.13, + 0.872, + 0.177 + ], + "angle": 0, + "content": "R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.212, + 0.896, + 0.25 + ], + "angle": 0, + "content": "Jingyi Zhang Jiaxing Huang Huanjin Yao Shunyu Liu Xikun Zhang Shijian Lu Dacheng Tao Nanyang Technological University, Singapore" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.283, + 0.327, + 0.3 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.317, + 0.485, + 0.681 + ], + "angle": 0, + "content": "Recent studies generally enhance MLLMs' reasoning capabilities via supervised fine-tuning on high-quality chain-of-thought reasoning data, which often leads models to merely imitate successful reasoning paths without understanding what the wrong reasoning paths are. In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. To this end, we design Step-wise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding. Specifically, StepGRPO introduces two novel rule-based reasoning rewards: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards the reasoning paths that contain necessary intermediate reasoning steps via a soft key-step matching technique, while StepRAR rewards reasoning paths that follow a well-structured and logically consistent reasoning process through a reasoning completeness and logic evaluation strategy. With the proposed StepGRPO, we introduce R1-VL, a series of MLLMs with outstanding capabilities in step-by-step reasoning. Extensive experiments over 8 benchmarks demonstrate the superiority of our methods. Code is available at link." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.727, + 0.222, + 0.743 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.753, + 0.483, + 0.875 + ], + "angle": 0, + "content": "Multimodal large language models (MLLMs) have achieved significant progress in vision-language understanding [1, 8, 15, 18, 21, 38, 43, 51]. Recent efforts generally enhance MLLMs' reasoning capabilities by employing supervised fine-tuning (SFT) on high-quality chain-of-thought (CoT) reasoning data generated by powerful models (e.g., GPT4) [37, 44, 46, 55]. For example, Mulberry [46] introduces CoMCTS, which utilizes multiple" + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.284, + 0.885, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.466, + 0.908, + 0.619 + ], + "angle": 0, + "content": "Figure 1. For MLLMs, online reinforcement learning with outcome-level reward, like in Deepseek-R1's GRPO [34], often suffers from sparse reward issues, where only a few reasoning paths can receive positive/high rewards during training, ultimately leading to poor exploration efficiency and unstable learning process. To tackle this, we propose a novel online reinforcement learning framework that incorporates step-wise reasoning rewards in addition to outcome-level rewards, encouraging MLLMs to iteratively refine their reasoning with dense rewards and resulting in a more stable training process and improved reasoning capability. The experiments are conducted on Qwen2-VL-7b over MathVista." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.654, + 0.907, + 0.777 + ], + "angle": 0, + "content": "models to collectively search and identify effective reasoning paths, followed by SFT on the collected reasoning data. However, SFT approaches focus solely on positive reasoning paths (i.e., those leading to correct answers), while the negative reasoning paths are largely neglected. This limitation may cause the model to merely imitate successful reasoning paths without understanding what the flawed and wrong reasoning paths are." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.909, + 0.903 + ], + "angle": 0, + "content": "In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. Recent advancements in NLP, such as Deepseek-R1 [13] and Kimi-K1.5 [36], have shown great potential in incentivizing the reasoning capability of LLMs via actively selfexploring. The core design of these advances (e.g., GRPO in Deepseek-R1) lies in online reinforcement learning without the need for reward models, which encourages an LLM" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.285, + 0.061, + 0.712 + ], + "angle": 270, + "content": "arXiv:2503.12937v2 [cs.AI] 4 Aug 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.887, + 0.458, + 0.902 + ], + "angle": 0, + "content": "Correspondence to: Jiaxing Huang {jiaxing.huang@ntu.edu.sg}." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.182 + ], + "angle": 0, + "content": "to generate a group of reasoning paths and iteratively refine its reasoning process by rewarding the generated reasoning paths based on a rule-based reward function. Typically, an outcome-level reward strategy is used: reasoning paths leading to correct answers receive higher rewards, while those leading to incorrect answers receive lower ones." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.185, + 0.482, + 0.365 + ], + "angle": 0, + "content": "An intuitive idea is to directly apply these simple and effective LLM online reinforcement learning methods for MLLMs. However, relying solely on outcome-level rewards, like in Deepseek-R1's GRPO, often suffers from sparse reward issues on MLLM reasoning learning, resulting in suboptimal performance. Specifically, most MLLMs, especially smaller ones, exhibit very limited capability in long-chain reasoning accuracy and validity, whereas only a few MLLM-generated reasoning paths can receive positive/high rewards. This lack of positive reward signals reduces exploration efficiency and leads to an unstable learning process, as illustrated in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.368, + 0.482, + 0.534 + ], + "angle": 0, + "content": "We propose to tackle this sparse reward issue by introducing dense step-wise reasoning rewards in addition to sparse outcome-level rewards. To this end, we design Stepwise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding while using no additional process reward models. Specifically, StepGRPO introduces two novel rule-based reasoning reward mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.536, + 0.483, + 0.808 + ], + "angle": 0, + "content": "StepRAR rewards the reasoning path using a soft key-step matching technique that evaluates whether the reasoning path contains key intermediate reasoning steps (i.e., the necessary steps to reach the correct final solution). StepRVR rewards the reasoning path based on a reasoning completeness and logic evaluation method, which assesses whether the reasoning process is well-structured and logically consistent. In this way, StepRAR and StepRVR help mitigate the sparse reward issue by providing informative rewards, even when the reasoning path does not produce the correct final answer – as long as it includes key intermediate reasoning steps or follows a structured and logical reasoning process. With StepRAR and StepRVR, StepGRPO takes the average step-wise reasoning rewards of a group of sampled reasoning paths as a baseline to estimate the advantage for policy optimization. Using the proposed StepGRPO, we develop R1-VL, a series of MLLMs with R1-like step-by-step reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.482, + 0.901 + ], + "angle": 0, + "content": "The proposed StepGRPO offers two key advantages. 1) Effectiveness. StepGRPO introduces two step-wise reasoning reward mechanisms with group relative optimization, which provide rich and fine-grained step-wise reasoning rewards along the whole reasoning trajectory beyond the final answer. This mitigates the sparse reward issue and encour" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.198 + ], + "angle": 0, + "content": "ages more structured, logically consistent reasoning trajectories. 2) Efficiency. StepGRPO achieves step-wise reasoning rewarding in a rule-based manner, which provides step-wise reasoning rewards while eliminating the need of process reward models. This significantly reduces computational overhead while maintaining fine-grained step-wise supervisions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.199, + 0.905, + 0.409 + ], + "angle": 0, + "content": "The main contributions of this work are threefold. First, we propose StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via a simple, effective and dense step-wise rewarding. Second, we design two novel rule-based reasoning reward mechanisms, i.e., step-wise reasoning accuracy reward and step-wise reasoning validity reward, which effectively mitigate the sparse reward issue for MLLMs without the need of process reward models. Third, with the proposed StepGRPO, we develop R1-VL, a series MLLMs that have superior reasoning capabilities. Forth, extensive experiments over multiple benchmarks show that R1-VL achieves superior performance compared with state-of-the-art MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.425, + 0.655, + 0.441 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.451, + 0.825, + 0.467 + ], + "angle": 0, + "content": "2.1. Multimodal Large Language Model" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.474, + 0.905, + 0.761 + ], + "angle": 0, + "content": "Multimodal Large Language Models (MLLMs) [1, 8, 15, 18, 21, 38, 43, 51, 52] have shown remarkable advancements across a wide range of vision-language understanding tasks, demonstrating their capabilities in comprehending and analyzing visual contents across various application domains. Early research on MLLMs primarily focuses on text generation based on text prompts and input multiple modalities such as images [20, 21, 53], videos [9, 35]. Recent advancements further enhance the capabilities of MLLMs from various aspects. For example, recent models [25, 42] incorporate multimodal inputs and outputs such as video, audio, and point cloud inputs beyond text and images. In addition, some efforts attempt to adapt MLLMs for domain-specific tasks, such as medical image understanding [17, 19, 56] and document analysis [22, 49]. In this work, we focus on enhancing the reasoning ability of MLLMs in tackling complex reasoning tasks and introduce R1-VL, a series of MLLMs that have superior reasoning capability." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.772, + 0.693, + 0.789 + ], + "angle": 0, + "content": "2.2. MLLM Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.905, + 0.903 + ], + "angle": 0, + "content": "Inspired by the advances in NLP that show great potential in learning to reason and tackling complex language tasks [29], recent studies attempt to enhance the reasoning capability of MLLM. Generally, current MLLM reasoning methods improve the reasoning capability of MLLM by generating high-quality chain-of-thoughts (CoT) data using powerful model (e.g., GPT-4) and performing supervised" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.243 + ], + "angle": 0, + "content": "fine-tuning with the collected data [10, 37, 44, 46, 55]. For example, Mulberry [46] introduces Collective Monte Carlo Tree Search (MCTS) into MLLM and proposes CoMCTS which leverages complementary knowledge from multiple models to collaboratively search and identify effective reasoning paths. In addition, recent works [14, 27, 30, 47] attempt to explore online reinforcement learning to improve the MLLMs' reasoning ability. Different from these works, we design StepGRPO that enables MLLM to self-improve the reasoning ability with step-wise reward signals." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.252, + 0.317, + 0.268 + ], + "angle": 0, + "content": "2.3. Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.273, + 0.483, + 0.455 + ], + "angle": 0, + "content": "Reinforcement Learning (RL) [16] is a fundamental approach in machine learning, where an agent learns to interact with an environment by taking actions, receiving rewards, and updating its policy to maximize the long-term return. With the rise of large language models (LLMs) [4, 28, 31], Reinforcement Learning with Human Feedback (RLHF) [3] has emerged as a key technique for fine-tuning models using human preference data. RLHF leverages algorithms like Proximal Policy Optimization (PPO) [33] and Direct Preference Optimization (DPO) [32] to guide model behavior for improving the alignment, coherence and helpfulness in response generation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.455, + 0.483, + 0.803 + ], + "angle": 0, + "content": "Recently, RL is increasingly adopted to enhance LLMs' reasoning capabilities [5, 7, 13, 24, 36, 50], especially for mathematical problem solving. The core is to adopt an appropriate reward function or model that evaluates and reinforces high-quality reasoning paths while penalizing low-quality ones, guiding the model's optimization towards more structured and coherent reasoning trajectories using the RL algorithm. For example, ReST-MCTS* [50] trains a process reward model (PRM) for determining the correctness of each reasoning step within reasoning paths. Recent methods have found that using a simple outcome-level rule-based reward function (i.e., the reasoning trajectories leading to correct answer are rewarded with higher score) can already provide an effective and reliable reward signal during the RL process [13, 24, 36]. For example, DeepSeek-R1 [13] demonstrates that group relative policy optimization (GRPO) [34] with outcome-level reward effectively enhances the reasoning capability of LLMs. In this work, we aim for improving the reasoning capability of MLLMs through reinforcement learning and propose StepGRPO, which effectively tackles the sparse reward issue in MLLMs, leading to stable training process and better reasoning capability." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.815, + 0.182, + 0.831 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.483, + 0.902 + ], + "angle": 0, + "content": "This section first presents the task formulation, and then introduces the proposed Step-wise Group Relative Policy Optimization (StepGRPO). More details to be elaborated in the ensuing subsections." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.688, + 0.105 + ], + "angle": 0, + "content": "3.1. Task Formulation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.113, + 0.905, + 0.295 + ], + "angle": 0, + "content": "In this paper, we consider a pre-trained MLLM and denote it as a policy model \\(\\pi_{\\theta}\\). Given a multimodal question \\(Q\\) consisting of an image and a textual task instruction, i.e., \\(Q = \\{\\text{text}, \\text{image}\\}\\), the policy model \\(\\pi\\) generates response \\(\\mathbf{c}\\) with a step-by-step reasoning trajectory. Generally, this process can be formulated as a sequence of next token prediction actions, i.e., \\(\\mathbf{c} = (a_1, a_2, \\dots, a_t, \\dots, a_T)\\), where each action \\(a_t\\) is sampled from the policy model \\(\\pi_{\\theta}\\) and \\(T\\) represents the maximum sequence length. After each action, the new state \\(s_{t+1}\\) is determined by updating the current state \\(s_t\\) with the newly generated action \\(a_t\\), i.e., \\(s_{t+1} = (s_t, a_t)\\), \\(1 \\leq t \\leq T\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.295, + 0.906, + 0.446 + ], + "angle": 0, + "content": "Considering this formulation, the objective of our task is to optimize the policy model \\(\\pi_{\\theta}\\) such that it can select better actions based on the previous states, thereby improving reasoning quality. In the context of reinforcement learning (RL), the policy model is generally optimized by maximizing the cumulative reward, where the reward for taking action \\(a_{t}\\) at state \\(s_t\\) is denoted as \\(r(s_t,a_t,s_{t + 1})\\). Following prior studies [46], we define an action in this paper as generating a reasoning step, which consists of one or more sentences containing multiple word tokens." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.454, + 0.902, + 0.47 + ], + "angle": 0, + "content": "3.2. Step-wise Group Relative Policy Optimization" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.476, + 0.906, + 0.612 + ], + "angle": 0, + "content": "We propose Step-wise Group Relative Policy Optimization (StepGRPO), a novel online reinforcement fine-tuning framework that mitigates the sparse reward issue for MLLMs and encourages self-improvement in reasoning ability through simple, effective and dense step-wise reward mechanisms. As illustrated in Fig. 2, StepGRPO consists of two phases: (1) a policy warm-up phase and (2) a step-wise online policy optimization phase. The overall algorithm is shown in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.62, + 0.673, + 0.635 + ], + "angle": 0, + "content": "3.2.1. Policy Warm-up" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.639, + 0.905, + 0.761 + ], + "angle": 0, + "content": "This phase equips the policy model with fundamental reasoning capabilities, ensuring it can generate proper stepwise reasoning paths before reinforcement learning. During the warm-up phase, the policy model is fine-tuned using a multimodal dataset \\(D_{s}\\) with Chain-of-Thought (CoT) reasoning path, where each data consists of a multimodal question \\(Q\\) and a step-by-step reasoning path \\(\\tau\\), i.e., \\(D_{s} = \\{Q^{n}, \\tau^{n}\\}_{n=1}^{N}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.77, + 0.905, + 0.813 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {w a r m - u p}} = - \\mathbb {E} _ {\\tau \\sim D _ {s}} [ \\sum_ {t = 1} ^ {T} \\log (\\pi_ {\\theta} (a _ {t} | s _ {t})) ]. \\tag {1}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.822, + 0.82, + 0.837 + ], + "angle": 0, + "content": "3.2.2. Step-wise Online Policy Optimization" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.905, + 0.902 + ], + "angle": 0, + "content": "This phase enables MLLMs to self-improve their reasoning ability via online reinforcement learning, mitigating the sparse reward issue through step-wise reasoning rewards. As illustrated in Fig. 2, for each question \\( Q \\in D_{s} \\)," + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.089, + 0.207, + 0.147 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.213, + 0.096, + 0.396, + 0.145 + ], + "angle": 0, + "content": "Question: In the given diagram, triangle ABC has AD as its median and point E is the midpoint of AD. If the area of triangle ABC is 12, what is the area of triangle ABE?" + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.095, + 0.887, + 0.145 + ], + "angle": 0, + "content": "Answer: Step 1: Since AD is a median, it divides triangle ABC into two equal areas: ABD and ACD. Step 2: Segment AE is half of AD, splitting triangle ABD into two triangles of equal area: ABE and BED. Step 3: The area of triangle ABD is half of triangle ABC, which is \\(\\frac{\\text{frac}}{12} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 6\\). Step 4: Since E is the midpoint of AD, triangle ABE is half of triangle ABD. Therefore, the area of triangle ABE is \\(\\frac{\\text{frac}}{6} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 3\\). The final answer is 3." + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.154, + 0.895, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.101, + 0.314, + 0.327, + 0.326 + ], + "angle": 0, + "content": "(a) Step-wise Reasoning Accuracy Reward" + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.332, + 0.26, + 0.355 + ], + "angle": 0, + "content": "Pre-extracted key steps with Augmentations:" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.358, + 0.26, + 0.367 + ], + "angle": 0, + "content": "1. AD is a median; median is \\(AD\\)" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.369, + 0.184, + 0.378 + ], + "angle": 0, + "content": "2. equal area; ..." + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.379, + 0.256, + 0.389 + ], + "angle": 0, + "content": "3. AE is half of AD; \\(AE = 1 / 2AD\\)" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.39, + 0.256, + 0.4 + ], + "angle": 0, + "content": "4. frac{12}{2} {2} = 6; \\(\\underline{12 / 2} = 6,\\dots\\)" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.401, + 0.211, + 0.41 + ], + "angle": 0, + "content": "5. E is the midpoint; .." + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.411, + 0.242, + 0.421 + ], + "angle": 0, + "content": "6. frac{6}{2} = 3; 6/2 = 3. ..." + }, + { + "type": "list", + "bbox": [ + 0.106, + 0.358, + 0.26, + 0.421 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.276, + 0.332, + 0.406, + 0.343 + ], + "angle": 0, + "content": "Soft key-step matching :" + }, + { + "type": "text", + "bbox": [ + 0.275, + 0.343, + 0.52, + 0.424 + ], + "angle": 0, + "content": "Description: The image shows ...; #Rationale: The question asks for the area...; #Step1: ... we find AD is a median of ...; #Step2: ... AE splits triangle ABD ...; #Step3: ... The area of triangle ABD is \\( 12/2 = 6 \\), ..., and the area of triangle ABE is frac{6}{2} = 3. #The final answer is: 3. Step-wise Matching score: 3/6" + }, + { + "type": "title", + "bbox": [ + 0.529, + 0.315, + 0.747, + 0.326 + ], + "angle": 0, + "content": "(b) Step-wise Reasoning Validity Reward" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.331, + 0.887, + 0.344 + ], + "angle": 0, + "content": "Description \\(\\rightarrow\\) #Rationale \\(\\rightarrow\\) # Step1 \\(\\rightarrow\\) ... \\(\\rightarrow\\) #Step \\(N\\rightarrow\\) #Answer." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.346, + 0.669, + 0.357 + ], + "angle": 0, + "content": "i. Reasoning completeness" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.358, + 0.878, + 0.37 + ], + "angle": 0, + "content": "Description \\(\\rightarrow\\) #Rationale \\(\\rightarrow\\) #Answer. Missing reasoning steps" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.371, + 0.896, + 0.384 + ], + "angle": 0, + "content": "Description \\(\\rightarrow\\) # Step1 \\(\\rightarrow\\) ... \\(\\rightarrow\\) #Step \\(N\\rightarrow\\) #Answer. Missing rationale" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.387, + 0.63, + 0.398 + ], + "angle": 0, + "content": "ii. Reasoning logic" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.398, + 0.887, + 0.424 + ], + "angle": 0, + "content": "Description \\(\\rightarrow\\) #Rationale \\(\\rightarrow\\) #Answer \\(\\rightarrow\\) #Step1... \\(\\rightarrow\\) #StepN. X \n#Description \\(\\rightarrow\\) #Step3 \\(\\rightarrow\\) #Rationale \\(\\rightarrow\\) ... \\(\\rightarrow\\) #Step I \\(\\rightarrow\\) #Answer X" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.441, + 0.907, + 0.54 + ], + "angle": 0, + "content": "Figure 2. Overview of the proposed StepGRPO. StepGRPO consists of two phases: a policy warm-up phase and a step-wise online policy optimization phase. After the warm-up, the policy model \\(\\pi_{\\theta}\\) generates a group of reasoning paths \\(\\{\\mathbf{c}^i\\}_{i=1}^M\\) and assigns step-wise rewards using two proposed mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards reasoning paths that contain key intermediate steps, identified using a soft key-step matching technique. StepRVR rewards reasoning paths based on completeness and logical consistency, ensuring they are well-structured. StepGRPO then estimates the advantage \\(\\hat{A}\\) for policy optimization by using the average step-wise reasoning reward of a group of sampled reasoning paths as a baseline. Examples for StepRAR and StepRVR are illustrated in (a) and (b), respectively." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.566, + 0.483, + 0.702 + ], + "angle": 0, + "content": "the policy model \\(\\pi_{\\theta}\\) first generates a group of \\(M\\) reasoning trajectories via multiple rollouts, i.e., \\(\\{\\mathbf{c}^i\\}_{i=1}^M\\), where \\(\\mathbf{c}^i = (a_1^i, a_2^i, \\ldots, a_t^i, \\ldots, a_T^i)\\). After obtaining a group of \\(M\\) reasoning trajectories, we employ our proposed step-wise reasoning rewards to evaluate and reward each generated reasoning trajectory. Specifically, we introduce two types of rule-based step-wise rewards, i.e., step-wise reasoning accuracy (StepRAR) reward and step-wise reasoning validity reward (StepRVR)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.705, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Step-wise reasoning accuracy reward (StepRAR) reduces the effect of learning from sparse reward by additionally rewarding reasoning paths that contain correct intermediate reasoning steps contributing to the final solution. Specifically, for each question \\( Q \\), we pre-extract a set of key reasoning steps \\( \\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\} \\) from the corresponding reasoning path \\( \\tau \\) in dataset \\( D_{s} \\). We define key steps as the essential variables and equations that directly contribute to the final solution, and prompt GPT-4 to extract several key steps from the reasoning path for each question. To ensure efficient reward assignment, we refine the extracted steps by removing redundant content and retaining only the core few words necessary for reasoning. Furthermore, we" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.566, + 0.907, + 0.656 + ], + "angle": 0, + "content": "augment each extracted key step into multiple equivalent formats to allow more flexible and accurate matching, preventing missed matches due to math-related formatting differences. For example, a mathematical expression such as \"\\(\\frac{6}{3} = 2\\)\" is augmented to \"6/3 = 2\" or \"6 divided by 3 equals 2\"." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.656, + 0.907, + 0.732 + ], + "angle": 0, + "content": "With the extracted key reasoning steps \\(\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}\\) and such soft marching mechanism, we calculate a match score for each generated reasoning path based on the ratio of matched key steps, i.e., \\(k^{i} = |\\mathbf{v}_{\\text{match}}| / |\\mathbf{v}|\\). Then, StepRAR for \\(1 \\leq t \\leq T\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.518, + 0.741, + 0.905, + 0.81 + ], + "angle": 0, + "content": "\\[\nr _ {a u c} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1 + \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = y, \\\\ \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) \\neq \\text {n u l l}, \\neq y, \\\\ 0, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = \\text {n u l l}, \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.811, + 0.905, + 0.841 + ], + "angle": 0, + "content": "where \\( y \\) is the ground-truth answer extracted from CoT reasoning path." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.841, + 0.906, + 0.902 + ], + "angle": 0, + "content": "By leveraging pre-extracted key reasoning steps, StepRAR efficiently provides additional supervision with a simple soft matching mechanism, ensuring the model learns meaningful reasoning processes instead of guessing" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.218, + 0.106 + ], + "angle": 0, + "content": "answers randomly." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.107, + 0.484, + 0.243 + ], + "angle": 0, + "content": "Step-wise reasoning validity reward (StepRVR) aims for ensuring the generated paths adhere to a logically structured and coherent progression beyond the reasoning accuracy. Prior studies [44, 46] have demonstrated structural reasoning, such as problem decomposition and progressive reasoning, facilitates more accurate and interpretable reasoning processes, as they encourage models to break down complex problems into multiple intermediate steps rather than direct answer generation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.245, + 0.484, + 0.441 + ], + "angle": 0, + "content": "Inspired by these findings, we incorporate step-wise reasoning validity to reinforce well-organized reasoning paths that follow an expected logical flow. Specifically, we define StepRVR using two key criteria: reasoning completeness \\(\\delta^c\\) and reasoning logic \\(\\delta^l\\). Reasoning completeness requires the response to include three essential components, i.e., a background analysis involving image description and rationale analysis to establish context, a step-by-step reasoning process and a final answer. In addition to the reasoning completeness, reasoning logic ensures the reasoning path to follow a logical progression, where the background analysis must come before solution steps and the final answer should only appear after reasoning steps are complete." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.442, + 0.419, + 0.457 + ], + "angle": 0, + "content": "With these two criteria, we define StepRVR as" + }, + { + "type": "equation", + "bbox": [ + 0.097, + 0.47, + 0.484, + 0.522 + ], + "angle": 0, + "content": "\\[\nr _ {v a l} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\mathbb {I} \\left(\\delta^ {c} \\left(s _ {t + 1}\\right)\\right) \\cdot \\mathbb {I} \\left(\\delta^ {l} \\left(s _ {t + 1}\\right)\\right) = 1, \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.523, + 0.484, + 0.599 + ], + "angle": 0, + "content": "where the reasoning trajectory is rewarded only if it satisfies both completeness and logical coherence. By enforcing this, StepRVR helps the model produce structured, interpretable and logically sound reasoning trajectories, enhancing both the quality and reliability of generated responses." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.6, + 0.484, + 0.676 + ], + "angle": 0, + "content": "Optimization with the step-wise rewards. After obtaining two types of step-wise rewards, we compute the overall reward for each reasoning path as \\( r^i = r_{auc}^i + r_{val}^i \\), and repeatedly compute the rewards for all generated reasoning paths, i.e., \\( \\{r^1, r^2, \\dots, r^M\\} \\)." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.677, + 0.483, + 0.707 + ], + "angle": 0, + "content": "To estimate the advantage of each reasoning trajectory, we normalize its reward relative to the group as follow:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.718, + 0.483, + 0.755 + ], + "angle": 0, + "content": "\\[\n\\hat {A} ^ {i} = \\frac {r ^ {i} - \\operatorname {m e a n} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.768, + 0.484, + 0.83 + ], + "angle": 0, + "content": "where the mean group reward serves as the baseline, and \\(\\hat{A}_i\\) measures how much better or worse \\(r_i\\) is compared to other reasoning trajectories within the group. Following this, we optimize the policy model with the loss defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.843, + 0.483, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\text {S t e p R L}} = - \\underset {Q \\in D _ {s}} {\\mathbb {E}} \\left[ \\frac {1}{M} \\sum_ {i = 1} ^ {M} \\left(\\frac {\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\left[ \\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right) \\right] _ {\\text {n o g r a d}}} \\hat {A} ^ {i} \\right. \\right. \\tag {5} \\\\ - \\beta D _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right) ], \\\\ \\end{array}\n\\]" + }, + { + "type": "code_caption", + "bbox": [ + 0.516, + 0.091, + 0.905, + 0.107 + ], + "angle": 0, + "content": "Algorithm 1 Step-wise Group Relative Policy Optimization" + }, + { + "type": "algorithm", + "bbox": [ + 0.517, + 0.109, + 0.905, + 0.373 + ], + "angle": 0, + "content": "Input: Policy model \\(\\pi_{\\theta}\\) initialized by a pre-trained \nMLLM; a multimodal dataset \\(D_{s} = \\{Q^{n},\\tau^{n}\\}_{n = 1}^{N}\\) \nOutput: Trained policy model \\(\\pi_{\\theta}\\) \nPolicy warm-up: \nfor iter \\(= 1\\) to \\(N\\) do Sample \\(\\{Q,\\tau \\} \\in D_s\\) Optimize policy model \\(\\pi_{\\theta}\\) by Eq. 1 \nend for \nStep-wise online policy optimization: \nfor iter \\(= 1\\) to \\(N\\) do Sample \\(\\{Q,\\tau \\} \\in D_s\\) Generate a group of reasoning paths \\(\\{\\mathbf{c}^i\\}_{i = 1}^M\\sim \\pi_\\theta\\) Obtain step-wise rewards \\(\\{r^i\\}_{i = 1}^M\\) by Eqs. 2-3 Obtain relative advantages \\(\\{\\hat{A}^i\\}_{i = 1}^M\\) by Eq. 4 Optimize policy model \\(\\pi_{\\theta}\\) by Eqs. 5-6 \nend for \nreturn policy model \\(\\pi_{\\theta}\\)" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.398, + 0.907, + 0.49 + ], + "angle": 0, + "content": "where KL divergence is adopted to regularize the policy model, preventing excessive deviation from the reference model. The reference model is typically initialized as the same model as the policy model but remains frozen during RL training. The KL divergence between the policy model and the reference model is estimated as in [34]:" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.499, + 0.907, + 0.533 + ], + "angle": 0, + "content": "\\[\nD _ {K L} \\left(\\pi_ {\\theta} \\right\\| \\pi_ {r e f} = \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - \\log \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - 1. \\tag {6}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.542, + 0.639, + 0.559 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.567, + 0.907, + 0.628 + ], + "angle": 0, + "content": "This section presents experiments including datasets and implementation details, main experimental results, ablation studies and discussion, respectively. More details are to be described in the ensuing subsections." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.637, + 0.616, + 0.651 + ], + "angle": 0, + "content": "4.1. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.659, + 0.907, + 0.84 + ], + "angle": 0, + "content": "For policy warm-up, we adopt Mulberry-260k [46] for supervised fine-tuning. For step-wise online policy optimization, we randomly sample 10K data from Mulberry-260k as our training data. For evaluation, we adopt 8 widely-used multimodal benchmarks for comprehensively evaluating our proposed StepGRPO, including MathVista [23], MMStar [6], Math-Vision [40], ChartQA [26], DynaMath [57], HallusionBench [12], MathVerse [54], MME [11] and MM-Reason [45]. These multimodal benchmarks cover a wide range of tasks from mathematical reasoning, chart understanding, visual hallucination and general visual understanding." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.849, + 0.731, + 0.864 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Our proposed StepGRPO is generally applicable to different MLLMs. In our experiments, we adopt two state-of-the-art" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.098, + 0.089, + 0.912, + 0.487 + ], + "angle": 0, + "content": "
MethodMathVistaMMStarMath-VChartQADynaMathHallBenchMathVerseMMEsumMMReasonAVG
Closed-Source Model
GPT-4o [15]63.863.930.385.763.755.039.4232921.156.2
Claude-3.5 Sonnet [1]67.762.2-90.864.855.0-1920--
Open-Source Model
Cambrain-1-8B [38]49.0--73.3------
MM-1.5-7B [51]47.6--78.6---1861--
Idefics3-LLaMA3-8B [18]58.455.9-74.8---1937--
InternVL2-8B [8]58.361.5-83.339.7--2210--
MiniCPM-V-2.6-8B [48]60.657.5---48.1-2348--
DeepSeek-VL2-MOE-4.5B [43]62.861.3-86.0---225311.5-
Reasoning Model
LLaVA-CoT-11B [44]54.857.6---47.8----
LLaVA-Reasoner-8B [55]50.654.0-83.0------
Insight-V-8B [10]49.857.4-77.4---2069--
Mulberry-7B [46]63.161.3-83.945.154.1-239611.8-
LlamaV-o1-11B [37]54.459.4---63.5----
Vision-R1-7B [14]73.5-----52.4---
LMM-R1 [30]63.258.026.3---41.5---
R1-ShareVL-7B [47]75.467.029.5---52.8---
Qwen2-VL-2B [41]43.048.012.473.524.941.719.718727.737.5
R1-VL-2B (Ours)52.149.817.175.229.444.026.220488.341.6
Qwen2-VL-7B [41]58.260.716.383.042.150.632.5232711.948.7
R1-VL-7B (Ours)63.560.024.783.945.254.740.0237612.552.1
Qwen2.5-VL-7B [2]68.263.925.187.353.252.149.2234717.355.5
R1-VL-7B* (Ours)74.366.228.287.756.557.252.2239517.958.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.498, + 0.908, + 0.542 + ], + "angle": 0, + "content": "Table 1. Main experimental results. To comprehensively examine the proposed StepGRPO, we conduct extensive experiments with two baseline models on eight benchmarks, and compare StepGRPO with various state-of-the-art MLLMs.* indicates that the model is trained using Qwen2.5-VL-7B as the base model with the data from [47]." + }, + { + "type": "table", + "bbox": [ + 0.102, + 0.564, + 0.48, + 0.682 + ], + "angle": 0, + "content": "
Warm-upStep-wise reasoning rewardsMathVista
StepRARStepRVR
58.2
61.2
62.4
61.9
63.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.693, + 0.463, + 0.707 + ], + "angle": 0, + "content": "Table 2. Ablation study of StepGRPO over Qwen2-VL-7B." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.734, + 0.483, + 0.81 + ], + "angle": 0, + "content": "open-source MLLMs, i.e., Qwen2-VL-2B and Qwen2-VL-7B [41]. For the policy warm-up phase, we set the training batch size to 128. Following prior work [46], we use a learning rate of \\(1\\mathrm{e}^{-5}\\) for Qwen2-VL-2B and \\(5\\mathrm{e}^{-6}\\) for Qwen2-VL-7B, respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.902 + ], + "angle": 0, + "content": "For the step-wise online policy optimization phase, we perform 4 rollouts per question \\((M = 4)\\) and set the sampling temperature to 1.2 to encourage diverse reasoning paths. The maximum sequence length is set to \\(L = 1024\\), ensuring that the model can generate complete reasoning paths. Both the policy model and reference model are ini" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.567, + 0.907, + 0.673 + ], + "angle": 0, + "content": "tialized from the model after the warm-up, with the reference model frozen during RL training. The policy model's learning rate is \\(1\\mathrm{e}^{-6}\\), and we set the batch size to 4. We set the coefficient of match score \\(\\alpha\\) to 0.1 to balance its effect. Following [39], the KL divergence coefficient \\(\\beta\\) in Eq. 5 is set to 0.04 by default. All experiments are conducted on 4 H100-80GB GPUs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.682, + 0.763, + 0.699 + ], + "angle": 0, + "content": "4.3. Main Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.705, + 0.907, + 0.749 + ], + "angle": 0, + "content": "We conduct a comprehensive evaluation of R1-VL across eight widely used benchmarks, comparing it with various state-of-the-art MLLMs, as shown in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.75, + 0.909, + 0.903 + ], + "angle": 0, + "content": "We first compare R1-VL with its baseline models, Qwen2-VL-2B and Qwen2-VL-7B. The baseline models exhibit limited reasoning capability, leading to very few reasoning paths receiving rewards, which negatively impacts the reasoning capability. In contrast, R1-VL with our proposed StepGRPO consistently improves the baseline models by significant margins, achieving \\(4.6\\%\\) improvement over Qwen2-VL-2B and \\(3.8\\%\\) over Qwen2-VL-7B. This improvement is largely attributed to that StepGRPO introduces step-wise reasoning accuracy and validity rewards," + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.089, + 0.481, + 0.155 + ], + "angle": 0, + "content": "
Number of generations M per question
Method23456
R1-VL-7B62.562.863.563.263.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.166, + 0.483, + 0.194 + ], + "angle": 0, + "content": "Table 3. Parameter analysis of \\( M \\). The experiments are conducted on Qwen2-VL-7B over MathVista." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.22, + 0.483, + 0.265 + ], + "angle": 0, + "content": "which provide rich and informative supervision at each reasoning step, effectively mitigating the sparse reward issue for MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.266, + 0.483, + 0.477 + ], + "angle": 0, + "content": "In addition, we compare R1-VL with existing state-of-the-art reasoning MLLMs. As shown in Table 1, R1-VL achieves better performance on most benchmarks, particularly in mathematical reasoning tasks. For example, R1-VL-7B surpasses Mulberry-7B and LlamaV-o1-11B by \\(0.6\\%\\) and \\(9.3\\%\\) respectively on the reasoning-intensive benchmark MathVista. Notably, R1-VL-2B even outperforms larger MLLMs. For instance, R1-VL-2B largely outperforms LLaVA-Reasoner-8B and LLaVA-CoT-11B by \\(13.1\\%\\) and \\(9.3\\%\\) on MathVista, respectively. This superior performance demonstrates that StepGRPO effectively enhances MLLMs' reasoning abilities by encouraging self-improvement via step-wise online reinforcement learning, rather than merely imitating positive reasoning paths." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.478, + 0.483, + 0.63 + ], + "angle": 0, + "content": "Additionally, we benchmark R1-VL against general MLLMs, including closed-source models such as GPT-4o and Claude-3.5 Sonnet, as well as open-source models like Cambrain-1-8B and DeepSeek-VL2-MOE-4.5B. We observe that R1-VL outperforms most open-source MLLMs and achieves competitive results against closed-source models. For example, R1-VL-7B achieves 63.7 accuracy on MathVista, closely matching GPT-4o's accuracy of 63.8. These results further validate StepGRPO's effectiveness in enhancing the reasoning capabilities of MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.638, + 0.245, + 0.654 + ], + "angle": 0, + "content": "4.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.659, + 0.483, + 0.902 + ], + "angle": 0, + "content": "We conduct ablation studies for StepGRPO on Qwen2-VL-7B over MathVista benchmark for examining the effect of step-wise reasoning rewards including step-wise reasoning accuracy reward (StepRAR) and step-wise reasoning validity reward (StepRVR), as well as the role of the warm-up phase. As shown in Table 2, involving a warm-up stage improves baseline model to \\(61.2\\%\\), allowing the model to learn basic reasoning knowledge before reinforcement learning. In addition, including either StepRAR or StepRVR into the online reinforcement learning process outperforms the model with warm-up by large margins, demonstrating that both two types of step-wise rewards contribute to enhancing step-by-step reasoning capabilities. The best performance (i.e., \\(63.7\\%\\)) is achieved when both StepRAR and StepRVR are applied together. This shows that StepGRPO effectively improves complex" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.089, + 0.905, + 0.164 + ], + "angle": 0, + "content": "
MethodMathVista
Warm-up61.7
Warm-up + Outcome-level reward62.3
Warm-up + Step-wise reward (Ours)63.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.175, + 0.907, + 0.203 + ], + "angle": 0, + "content": "Table 4. Effectiveness of the step-wise reasoning rewards. The experiments are conducted on Qwen2-VL-7B over MathVista." + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.22, + 0.859, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.407, + 0.907, + 0.435 + ], + "angle": 0, + "content": "Figure 3. Comparison between StepGRPO and SFT. The experiments are conducted on Qwen2-VL-7B over MathVista." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.462, + 0.906, + 0.507 + ], + "angle": 0, + "content": "reasoning tasks by reinforcing both the correctness of intermediate steps and the overall logical structure of the reasoning process." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.517, + 0.633, + 0.532 + ], + "angle": 0, + "content": "4.5. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.539, + 0.907, + 0.765 + ], + "angle": 0, + "content": "Parameter analysis. We conduct the parameter analysis on the number of generations \\( M \\) in a group with Qwen2-VL7B over benchmark MathVista, analyzing its impact on reasoning performance. As described in Section 3, \\( M \\) controls the number of generated reasoning trajectories per question during the RL phase. Table 3 shows that a larger \\( M \\) generally leads to better performance. This is because, in group relative optimization, the baseline reward is estimated as the average reward of all generated reasoning paths. A larger \\( M \\) results in a more stable and accurate baseline estimation, whereas a small \\( M \\) may lead to high variance in baseline estimation, making the optimization process less reliable. However, increasing \\( M \\) also introduces higher computational costs. Therefore, we set \\( M = 4 \\) as the default to balance performance and computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.766, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Effectiveness of the step-wise reward. Our proposed stepwise reward mechanism plays a crucial role in mitigating the sparse reward issue by providing fine-grained supervision at each reasoning step. To further validate its effectiveness, we conduct an experiment comparing outcome-level reward against our step-wise reward. Specifically, we evaluate three settings: (1) Warm-up only; (2) Warm-up + Outcome-level Reward, where the model is optimized with outcome-level reward; and (3) Warm-up + Step-wise Re" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.098, + 0.156, + 0.26, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.103, + 0.261, + 0.252, + 0.292 + ], + "angle": 0, + "content": "Question: Subtract all red things. \nSubtract all tiny matte balls. How many objects are left?" + }, + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.301, + 0.186, + 0.311 + ], + "angle": 0, + "content": "Correct answer: 5" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.094, + 0.351, + 0.104 + ], + "angle": 0, + "content": "Qwen2-VL-7B:" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.107, + 0.568, + 0.128 + ], + "angle": 0, + "content": "First, identify the red objects in the image. There is 1 red object, which is a small red sphere. Incorrect reasoning" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.128, + 0.572, + 0.177 + ], + "angle": 0, + "content": "Then, subtract the red object from the total number of objects. There are 7 objects in total and subtracting the 1 red object leaves 6 objects. Identify the tiny matte balls in the image. There are 2 tiny matte balls, which are the small green cylinder and the cyan sphere. Subtract the tiny matte balls from the remaining objects." + }, + { + "type": "text", + "bbox": [ + 0.281, + 0.178, + 0.552, + 0.197 + ], + "angle": 0, + "content": "After subtracting the red object, there are 6 objects left. Subtracting the 2 tiny matte balls leaves 4 objects. Answer: 4" + }, + { + "type": "title", + "bbox": [ + 0.28, + 0.206, + 0.343, + 0.217 + ], + "angle": 0, + "content": "Mulberry-7B:" + }, + { + "type": "text", + "bbox": [ + 0.279, + 0.22, + 0.582, + 0.254 + ], + "angle": 0, + "content": "Image Description: The image shows several 3D objects on a flat surface. There are two red objects: a small red sphere and a larger green cylinder. Other objects include a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a teal sphere." + }, + { + "type": "text", + "bbox": [ + 0.279, + 0.256, + 0.578, + 0.282 + ], + "angle": 0, + "content": "Rationales: The question asks to subtract all red things and all tiny matte balls. We need to identify these objects in the image and count the remaining ones." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.284, + 0.403, + 0.294 + ], + "angle": 0, + "content": "Let's think step by step." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.295, + 0.58, + 0.334 + ], + "angle": 0, + "content": "Step 1: Identify and count all red objects. There is one red sphere. Incorrect reasoning Step 2: Identify and count all tiny matte balls. There is one teal sphere, which is not red, so it is not subtracted." + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.334, + 0.572, + 0.355 + ], + "angle": 0, + "content": "Step 3: Subtract the red objects and tiny matte balls from the total number of objects: \\(7 - 1 = 6\\)" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.357, + 0.394, + 0.366 + ], + "angle": 0, + "content": "The final answer is: 6" + }, + { + "type": "title", + "bbox": [ + 0.603, + 0.097, + 0.68, + 0.107 + ], + "angle": 0, + "content": "R1-VL-7B (Ours):" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.113, + 0.707, + 0.122 + ], + "angle": 0, + "content": "Image Description:" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.122, + 0.892, + 0.154 + ], + "angle": 0, + "content": "The image shows a collection of 3D geometric objects on a flat surface. There are various colored shapes, including a red sphere, a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a cyan sphere." + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.161, + 0.673, + 0.17 + ], + "angle": 0, + "content": "Rationales:" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.17, + 0.89, + 0.202 + ], + "angle": 0, + "content": "The question asks to subtract all red things and all tiny matte balls, then determine how many objects are left. This involves identifying and counting specific objects based on their color and size." + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.209, + 0.726, + 0.219 + ], + "angle": 0, + "content": "Let's think step by step." + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.225, + 0.655, + 0.233 + ], + "angle": 0, + "content": "Step 1:" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.233, + 0.886, + 0.259 + ], + "angle": 0, + "content": "Identify and count all objects in the image. There are seven objects: a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, a red sphere, and a cyan sphere." + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.264, + 0.655, + 0.273 + ], + "angle": 0, + "content": "Step 2:" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.273, + 0.81, + 0.282 + ], + "angle": 0, + "content": "Subtract all red things. There is one red sphere." + }, + { + "type": "image", + "bbox": [ + 0.818, + 0.271, + 0.832, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.288, + 0.655, + 0.297 + ], + "angle": 0, + "content": "Step 3:" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.297, + 0.886, + 0.315 + ], + "angle": 0, + "content": "Subtract all tiny matte balls from the remaining objects. There is one cyan sphere, which is matte and small." + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.32, + 0.655, + 0.329 + ], + "angle": 0, + "content": "Step 4:" + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.329, + 0.882, + 0.346 + ], + "angle": 0, + "content": "Count the remaining objects after removing the red sphere and the cyan sphere." + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.352, + 0.736, + 0.362 + ], + "angle": 0, + "content": "The final answer is: 5" + }, + { + "type": "image_caption", + "bbox": [ + 0.397, + 0.384, + 0.6, + 0.399 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparison." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.425, + 0.483, + 0.546 + ], + "angle": 0, + "content": "ward, where the model is optimized with our proposed stepwise reasoning reward. As shown in Table 4, both outcome-level reward and our step-wise reward improve the warm-up model's performance, while our step-wise reward achieves better performance. This further demonstrates that stepwise rewards are more effective in enhancing MLLMs' reasoning capabilities, as they provide more fine-grained supervision and largely mitigate the sparse reward issue." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.549, + 0.483, + 0.806 + ], + "angle": 0, + "content": "Comparison to supervised fine-tuning (SFT). As discussed before, StepGRPO encourages MLLM to self-improve the reasoning ability with step-wise reward signals rather than merely imitating the successful reasoning paths. Here, we conduct experiments to further compare StepGRPO with SFT. Specifically, we start with the model after the warm-up and conduct the experiments with Qwen2-VL-7B over MathVista. As shown in Fig. 3, under the same number of training steps, StepGRPO consistently outperforms SFT, demonstrating the effectiveness of step-wise reinforcement learning. This is largely attributed to StepGRPO's ability to refine reasoning trajectories through self-exploration and reward-guided optimization, rather than solely relying on passive imitation of reasoning paths. By leveraging step-wise reasoning rewards, StepGRPO provides more rich and informative supervision, leading to better reasoning processes compared to SFT." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Qualitative comparison. We provide qualitative comparison of Qwen2VL-7B, Mulberry-7B and our R1-VL-7B. As shown in Fig. 4, Qwen2-VL-7B generates relatively short responses, lacking a thorough reasoning process. While Mulberry-7B generates detailed reasoning paths, its intermediate steps contain errors, leading to incorrect final an" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.425, + 0.905, + 0.455 + ], + "angle": 0, + "content": "svers. In contrast, R1-VL-7B enables more accurate step-by-step reasoning process." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.455, + 0.905, + 0.486 + ], + "angle": 0, + "content": "We provide more discussions, experimental results and qualitative analysis in the appendix." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.498, + 0.634, + 0.514 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.524, + 0.907, + 0.809 + ], + "angle": 0, + "content": "This paper presents StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise reward mechanism. Specifically, StepGRPO introduces two rule-based reasoning reward mechanisms, i.e., Step-wise Reasoning Accuracy Reward that rewards the intermediate reasoning steps based on a soft key-step matching technique and Step-wise Reasoning Validity Reward that rewards the reasoning path's reasoning structure and logical consistency though a reasoning completeness and logic evaluation method. In this way, StepGRPO enables to effectively mitigate the sparse reward issue for MLLMs without the need of process reward models and encourages more structured and logically consistent reasoning process. With the proposed StepGRPO, we develop R1-VL, a series of MLLMs with superior reasoning capability. Extensive experiments over eight benchmarks demonstrate the superiority of the proposed StepGRPO compared with the state-of-the-art MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.811, + 0.906, + 0.901 + ], + "angle": 0, + "content": "Acknowledgement. This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL)." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.091, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.116, + 0.386, + 0.128 + ], + "angle": 0, + "content": "[1] Anthropic. Claude 3.5 sonnet, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.129, + 0.483, + 0.182 + ], + "angle": 0, + "content": "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.183, + 0.482, + 0.25 + ], + "angle": 0, + "content": "[3] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.251, + 0.482, + 0.319 + ], + "angle": 0, + "content": "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.32, + 0.482, + 0.359 + ], + "angle": 0, + "content": "[5] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. arXiv preprint arXiv:2406.10858, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.36, + 0.482, + 0.425 + ], + "angle": 0, + "content": "[6] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.427, + 0.482, + 0.48 + ], + "angle": 0, + "content": "[7] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.481, + 0.482, + 0.549 + ], + "angle": 0, + "content": "[8] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.549, + 0.482, + 0.617 + ], + "angle": 0, + "content": "[9] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.617, + 0.482, + 0.685 + ], + "angle": 0, + "content": "[10] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.685, + 0.482, + 0.753 + ], + "angle": 0, + "content": "[11] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.753, + 0.482, + 0.834 + ], + "angle": 0, + "content": "[12] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.835, + 0.482, + 0.89 + ], + "angle": 0, + "content": "[13] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.116, + 0.483, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.093, + 0.726, + 0.106 + ], + "angle": 0, + "content": "arXiv:2501.12948,2025.1,3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.106, + 0.905, + 0.173 + ], + "angle": 0, + "content": "[14] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Zhe Xu, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.175, + 0.905, + 0.228 + ], + "angle": 0, + "content": "[15] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.229, + 0.905, + 0.268 + ], + "angle": 0, + "content": "[16] Leslie Pack Kaelbling, Michael L Littman, and Andrew W Moore. Reinforcement learning: A survey. Journal of artificial intelligence research, 4:237-285, 1996. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.269, + 0.905, + 0.322 + ], + "angle": 0, + "content": "[17] Xiang Lan, Feng Wu, Kai He, Qinghao Zhao, Shenda Hong, and Mengling Feng. Gem: Empowering mllm for grounded ecg understanding with time series and images. arXiv preprint arXiv:2503.06073, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.323, + 0.905, + 0.39 + ], + "angle": 0, + "content": "[18] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. In Workshop on Responsibly Building the Next Generation of Multimodal Foundational Models, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.391, + 0.905, + 0.459 + ], + "angle": 0, + "content": "[19] Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv preprint arXiv:2306.00890, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.459, + 0.905, + 0.512 + ], + "angle": 0, + "content": "[20] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.513, + 0.905, + 0.553 + ], + "angle": 0, + "content": "[21] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.553, + 0.905, + 0.607 + ], + "angle": 0, + "content": "[22] Yuliang Liu, Biao Yang, Qiang Liu, Zhang Li, Zhiyin Ma, Shuo Zhang, and Xiang Bai. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.608, + 0.905, + 0.675 + ], + "angle": 0, + "content": "[23] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.675, + 0.905, + 0.715 + ], + "angle": 0, + "content": "[24] Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. Reft: Reasoning with reinforced fine-tuning. arXiv preprint arXiv:2401.08967, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.715, + 0.905, + 0.782 + ], + "angle": 0, + "content": "[25] Chenyang Lyu, Minghao Wu, Longyue Wang, Xinting Huang, Bingshuai Liu, Zefeng Du, Shuming Shi, and Zhaopeng Tu. Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration. arXiv preprint arXiv:2306.09093, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.783, + 0.905, + 0.837 + ], + "angle": 0, + "content": "[26] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.837, + 0.905, + 0.892 + ], + "angle": 0, + "content": "[27] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Tiancheng Han, Botian Shi, Wenhai Wang, Junjun He, et al. Mm-eureka: Exploring the frontiers of multimodal reasoning with rule-based reinforce" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.892 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.092, + 0.466, + 0.106 + ], + "angle": 0, + "content": "ment learning. arXiv preprint arXiv:2503.07365, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.107, + 0.368, + 0.119 + ], + "angle": 0, + "content": "[28] OpenAI. Gpt-4 technical report, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.119, + 0.368, + 0.132 + ], + "angle": 0, + "content": "[29] OpenAI. Introducing openai o1, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.132, + 0.483, + 0.199 + ], + "angle": 0, + "content": "[30] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.199, + 0.483, + 0.24 + ], + "angle": 0, + "content": "[31] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.24, + 0.483, + 0.307 + ], + "angle": 0, + "content": "[32] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.307, + 0.483, + 0.348 + ], + "angle": 0, + "content": "[33] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.348, + 0.483, + 0.415 + ], + "angle": 0, + "content": "[34] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 1, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.415, + 0.483, + 0.47 + ], + "angle": 0, + "content": "[35] Guangzhi Sun, Wenyi Yu, Changli Tang, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, Yuxuan Wang, and Chao Zhang. video-salmonn: Speech-enhanced audio-visual large language models. arXiv preprint arXiv:2406.15704, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.47, + 0.483, + 0.536 + ], + "angle": 0, + "content": "[36] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.538, + 0.483, + 0.606 + ], + "angle": 0, + "content": "[37] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamavol: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.606, + 0.483, + 0.673 + ], + "angle": 0, + "content": "[38] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.673, + 0.483, + 0.741 + ], + "angle": 0, + "content": "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.741, + 0.483, + 0.809 + ], + "angle": 0, + "content": "[40] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.809, + 0.483, + 0.877 + ], + "angle": 0, + "content": "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.877, + 0.483, + 0.891 + ], + "angle": 0, + "content": "[42] Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.891 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.906, + 0.119 + ], + "angle": 0, + "content": "Chua. Next-gpt: Any-to-any multimodal lIm. arXiv preprint arXiv:2309.05519, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.12, + 0.906, + 0.201 + ], + "angle": 0, + "content": "[43] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.201, + 0.906, + 0.255 + ], + "angle": 0, + "content": "[44] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.255, + 0.906, + 0.324 + ], + "angle": 0, + "content": "[45] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. MMreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.324, + 0.906, + 0.405 + ], + "angle": 0, + "content": "[46] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.405, + 0.906, + 0.473 + ], + "angle": 0, + "content": "[47] Huanjin Yao, Qixiang Yin, Jingyi Zhang, Min Yang, Yibo Wang, Wenhao Wu, Fei Su, Li Shen, Minghui Qiu, Dacheng Tao, et al. R1-sharev1: Incentivizing reasoning capability of multimodal large language models via share-grpo. arXiv preprint arXiv:2505.16673, 2025. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.473, + 0.906, + 0.528 + ], + "angle": 0, + "content": "[48] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.528, + 0.906, + 0.595 + ], + "angle": 0, + "content": "[49] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Yuhao Dan, Chenlin Zhao, Guohai Xu, Chenliang Li, Junfeng Tian, et al. mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.595, + 0.906, + 0.649 + ], + "angle": 0, + "content": "[50] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.649, + 0.906, + 0.717 + ], + "angle": 0, + "content": "[51] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.717, + 0.906, + 0.771 + ], + "angle": 0, + "content": "[52] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.771, + 0.906, + 0.826 + ], + "angle": 0, + "content": "[53] Jingyi Zhang, Jiaxing Huang, Xiaoqin Zhang, Ling Shao, and Shijian Lu. Historical test-time prompt tuning for vision foundation models. Advances in Neural Information Processing Systems, 37:12872-12896, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.826, + 0.906, + 0.895 + ], + "angle": 0, + "content": "[54] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.092, + 0.231, + 0.106 + ], + "angle": 0, + "content": "Springer, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.107, + 0.482, + 0.173 + ], + "angle": 0, + "content": "[55] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.175, + 0.482, + 0.228 + ], + "angle": 0, + "content": "[56] Xiaoman Zhang, Chaoyi Wu, Ziheng Zhao, Weixiong Lin, Ya Zhang, Yanfeng Wang, and Weidi Xie. Pmc-vqa: Visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.229, + 0.482, + 0.295 + ], + "angle": 0, + "content": "[57] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 5" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.107, + 0.482, + 0.295 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf b/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..736dcc939e2f1748df8cab266c743ae248c7b8d4 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cac011a66aa3b0fd0b99a20490ee23528ed74524bd9ad8c98f9adb95d1907bbb +size 832366 diff --git a/data/2025/2503_12xxx/2503.12937/full.md b/data/2025/2503_12xxx/2503.12937/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8060b7bf93374ba8c14bf6d753d0eb65f7521abc --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/full.md @@ -0,0 +1,380 @@ +# R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization + +Jingyi Zhang Jiaxing Huang Huanjin Yao Shunyu Liu Xikun Zhang Shijian Lu Dacheng Tao Nanyang Technological University, Singapore + +# Abstract + +Recent studies generally enhance MLLMs' reasoning capabilities via supervised fine-tuning on high-quality chain-of-thought reasoning data, which often leads models to merely imitate successful reasoning paths without understanding what the wrong reasoning paths are. In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. To this end, we design Step-wise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding. Specifically, StepGRPO introduces two novel rule-based reasoning rewards: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards the reasoning paths that contain necessary intermediate reasoning steps via a soft key-step matching technique, while StepRAR rewards reasoning paths that follow a well-structured and logically consistent reasoning process through a reasoning completeness and logic evaluation strategy. With the proposed StepGRPO, we introduce R1-VL, a series of MLLMs with outstanding capabilities in step-by-step reasoning. Extensive experiments over 8 benchmarks demonstrate the superiority of our methods. Code is available at link. + +# 1. Introduction + +Multimodal large language models (MLLMs) have achieved significant progress in vision-language understanding [1, 8, 15, 18, 21, 38, 43, 51]. Recent efforts generally enhance MLLMs' reasoning capabilities by employing supervised fine-tuning (SFT) on high-quality chain-of-thought (CoT) reasoning data generated by powerful models (e.g., GPT4) [37, 44, 46, 55]. For example, Mulberry [46] introduces CoMCTS, which utilizes multiple + +![](images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg) +Figure 1. For MLLMs, online reinforcement learning with outcome-level reward, like in Deepseek-R1's GRPO [34], often suffers from sparse reward issues, where only a few reasoning paths can receive positive/high rewards during training, ultimately leading to poor exploration efficiency and unstable learning process. To tackle this, we propose a novel online reinforcement learning framework that incorporates step-wise reasoning rewards in addition to outcome-level rewards, encouraging MLLMs to iteratively refine their reasoning with dense rewards and resulting in a more stable training process and improved reasoning capability. The experiments are conducted on Qwen2-VL-7b over MathVista. + +models to collectively search and identify effective reasoning paths, followed by SFT on the collected reasoning data. However, SFT approaches focus solely on positive reasoning paths (i.e., those leading to correct answers), while the negative reasoning paths are largely neglected. This limitation may cause the model to merely imitate successful reasoning paths without understanding what the flawed and wrong reasoning paths are. + +In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. Recent advancements in NLP, such as Deepseek-R1 [13] and Kimi-K1.5 [36], have shown great potential in incentivizing the reasoning capability of LLMs via actively selfexploring. The core design of these advances (e.g., GRPO in Deepseek-R1) lies in online reinforcement learning without the need for reward models, which encourages an LLM + +to generate a group of reasoning paths and iteratively refine its reasoning process by rewarding the generated reasoning paths based on a rule-based reward function. Typically, an outcome-level reward strategy is used: reasoning paths leading to correct answers receive higher rewards, while those leading to incorrect answers receive lower ones. + +An intuitive idea is to directly apply these simple and effective LLM online reinforcement learning methods for MLLMs. However, relying solely on outcome-level rewards, like in Deepseek-R1's GRPO, often suffers from sparse reward issues on MLLM reasoning learning, resulting in suboptimal performance. Specifically, most MLLMs, especially smaller ones, exhibit very limited capability in long-chain reasoning accuracy and validity, whereas only a few MLLM-generated reasoning paths can receive positive/high rewards. This lack of positive reward signals reduces exploration efficiency and leads to an unstable learning process, as illustrated in Fig. 1. + +We propose to tackle this sparse reward issue by introducing dense step-wise reasoning rewards in addition to sparse outcome-level rewards. To this end, we design Stepwise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding while using no additional process reward models. Specifically, StepGRPO introduces two novel rule-based reasoning reward mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). + +StepRAR rewards the reasoning path using a soft key-step matching technique that evaluates whether the reasoning path contains key intermediate reasoning steps (i.e., the necessary steps to reach the correct final solution). StepRVR rewards the reasoning path based on a reasoning completeness and logic evaluation method, which assesses whether the reasoning process is well-structured and logically consistent. In this way, StepRAR and StepRVR help mitigate the sparse reward issue by providing informative rewards, even when the reasoning path does not produce the correct final answer – as long as it includes key intermediate reasoning steps or follows a structured and logical reasoning process. With StepRAR and StepRVR, StepGRPO takes the average step-wise reasoning rewards of a group of sampled reasoning paths as a baseline to estimate the advantage for policy optimization. Using the proposed StepGRPO, we develop R1-VL, a series of MLLMs with R1-like step-by-step reasoning capabilities. + +The proposed StepGRPO offers two key advantages. 1) Effectiveness. StepGRPO introduces two step-wise reasoning reward mechanisms with group relative optimization, which provide rich and fine-grained step-wise reasoning rewards along the whole reasoning trajectory beyond the final answer. This mitigates the sparse reward issue and encour + +ages more structured, logically consistent reasoning trajectories. 2) Efficiency. StepGRPO achieves step-wise reasoning rewarding in a rule-based manner, which provides step-wise reasoning rewards while eliminating the need of process reward models. This significantly reduces computational overhead while maintaining fine-grained step-wise supervisions. + +The main contributions of this work are threefold. First, we propose StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via a simple, effective and dense step-wise rewarding. Second, we design two novel rule-based reasoning reward mechanisms, i.e., step-wise reasoning accuracy reward and step-wise reasoning validity reward, which effectively mitigate the sparse reward issue for MLLMs without the need of process reward models. Third, with the proposed StepGRPO, we develop R1-VL, a series MLLMs that have superior reasoning capabilities. Forth, extensive experiments over multiple benchmarks show that R1-VL achieves superior performance compared with state-of-the-art MLLMs. + +# 2. Related Work + +# 2.1. Multimodal Large Language Model + +Multimodal Large Language Models (MLLMs) [1, 8, 15, 18, 21, 38, 43, 51, 52] have shown remarkable advancements across a wide range of vision-language understanding tasks, demonstrating their capabilities in comprehending and analyzing visual contents across various application domains. Early research on MLLMs primarily focuses on text generation based on text prompts and input multiple modalities such as images [20, 21, 53], videos [9, 35]. Recent advancements further enhance the capabilities of MLLMs from various aspects. For example, recent models [25, 42] incorporate multimodal inputs and outputs such as video, audio, and point cloud inputs beyond text and images. In addition, some efforts attempt to adapt MLLMs for domain-specific tasks, such as medical image understanding [17, 19, 56] and document analysis [22, 49]. In this work, we focus on enhancing the reasoning ability of MLLMs in tackling complex reasoning tasks and introduce R1-VL, a series of MLLMs that have superior reasoning capability. + +# 2.2. MLLM Reasoning + +Inspired by the advances in NLP that show great potential in learning to reason and tackling complex language tasks [29], recent studies attempt to enhance the reasoning capability of MLLM. Generally, current MLLM reasoning methods improve the reasoning capability of MLLM by generating high-quality chain-of-thoughts (CoT) data using powerful model (e.g., GPT-4) and performing supervised + +fine-tuning with the collected data [10, 37, 44, 46, 55]. For example, Mulberry [46] introduces Collective Monte Carlo Tree Search (MCTS) into MLLM and proposes CoMCTS which leverages complementary knowledge from multiple models to collaboratively search and identify effective reasoning paths. In addition, recent works [14, 27, 30, 47] attempt to explore online reinforcement learning to improve the MLLMs' reasoning ability. Different from these works, we design StepGRPO that enables MLLM to self-improve the reasoning ability with step-wise reward signals. + +# 2.3. Reinforcement Learning + +Reinforcement Learning (RL) [16] is a fundamental approach in machine learning, where an agent learns to interact with an environment by taking actions, receiving rewards, and updating its policy to maximize the long-term return. With the rise of large language models (LLMs) [4, 28, 31], Reinforcement Learning with Human Feedback (RLHF) [3] has emerged as a key technique for fine-tuning models using human preference data. RLHF leverages algorithms like Proximal Policy Optimization (PPO) [33] and Direct Preference Optimization (DPO) [32] to guide model behavior for improving the alignment, coherence and helpfulness in response generation. + +Recently, RL is increasingly adopted to enhance LLMs' reasoning capabilities [5, 7, 13, 24, 36, 50], especially for mathematical problem solving. The core is to adopt an appropriate reward function or model that evaluates and reinforces high-quality reasoning paths while penalizing low-quality ones, guiding the model's optimization towards more structured and coherent reasoning trajectories using the RL algorithm. For example, ReST-MCTS* [50] trains a process reward model (PRM) for determining the correctness of each reasoning step within reasoning paths. Recent methods have found that using a simple outcome-level rule-based reward function (i.e., the reasoning trajectories leading to correct answer are rewarded with higher score) can already provide an effective and reliable reward signal during the RL process [13, 24, 36]. For example, DeepSeek-R1 [13] demonstrates that group relative policy optimization (GRPO) [34] with outcome-level reward effectively enhances the reasoning capability of LLMs. In this work, we aim for improving the reasoning capability of MLLMs through reinforcement learning and propose StepGRPO, which effectively tackles the sparse reward issue in MLLMs, leading to stable training process and better reasoning capability. + +# 3. Method + +This section first presents the task formulation, and then introduces the proposed Step-wise Group Relative Policy Optimization (StepGRPO). More details to be elaborated in the ensuing subsections. + +# 3.1. Task Formulation + +In this paper, we consider a pre-trained MLLM and denote it as a policy model $\pi_{\theta}$ . Given a multimodal question $Q$ consisting of an image and a textual task instruction, i.e., $Q = \{\text{text}, \text{image}\}$ , the policy model $\pi$ generates response $\mathbf{c}$ with a step-by-step reasoning trajectory. Generally, this process can be formulated as a sequence of next token prediction actions, i.e., $\mathbf{c} = (a_1, a_2, \dots, a_t, \dots, a_T)$ , where each action $a_t$ is sampled from the policy model $\pi_{\theta}$ and $T$ represents the maximum sequence length. After each action, the new state $s_{t+1}$ is determined by updating the current state $s_t$ with the newly generated action $a_t$ , i.e., $s_{t+1} = (s_t, a_t)$ , $1 \leq t \leq T$ . + +Considering this formulation, the objective of our task is to optimize the policy model $\pi_{\theta}$ such that it can select better actions based on the previous states, thereby improving reasoning quality. In the context of reinforcement learning (RL), the policy model is generally optimized by maximizing the cumulative reward, where the reward for taking action $a_{t}$ at state $s_t$ is denoted as $r(s_t,a_t,s_{t + 1})$ . Following prior studies [46], we define an action in this paper as generating a reasoning step, which consists of one or more sentences containing multiple word tokens. + +# 3.2. Step-wise Group Relative Policy Optimization + +We propose Step-wise Group Relative Policy Optimization (StepGRPO), a novel online reinforcement fine-tuning framework that mitigates the sparse reward issue for MLLMs and encourages self-improvement in reasoning ability through simple, effective and dense step-wise reward mechanisms. As illustrated in Fig. 2, StepGRPO consists of two phases: (1) a policy warm-up phase and (2) a step-wise online policy optimization phase. The overall algorithm is shown in Algorithm 1. + +# 3.2.1. Policy Warm-up + +This phase equips the policy model with fundamental reasoning capabilities, ensuring it can generate proper stepwise reasoning paths before reinforcement learning. During the warm-up phase, the policy model is fine-tuned using a multimodal dataset $D_{s}$ with Chain-of-Thought (CoT) reasoning path, where each data consists of a multimodal question $Q$ and a step-by-step reasoning path $\tau$ , i.e., $D_{s} = \{Q^{n}, \tau^{n}\}_{n=1}^{N}$ : + +$$ +\mathcal {L} _ {\text {w a r m - u p}} = - \mathbb {E} _ {\tau \sim D _ {s}} [ \sum_ {t = 1} ^ {T} \log (\pi_ {\theta} (a _ {t} | s _ {t})) ]. \tag {1} +$$ + +# 3.2.2. Step-wise Online Policy Optimization + +This phase enables MLLMs to self-improve their reasoning ability via online reinforcement learning, mitigating the sparse reward issue through step-wise reasoning rewards. As illustrated in Fig. 2, for each question $Q \in D_{s}$ , + +![](images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg) +Question: In the given diagram, triangle ABC has AD as its median and point E is the midpoint of AD. If the area of triangle ABC is 12, what is the area of triangle ABE? + +![](images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg) +Answer: Step 1: Since AD is a median, it divides triangle ABC into two equal areas: ABD and ACD. Step 2: Segment AE is half of AD, splitting triangle ABD into two triangles of equal area: ABE and BED. Step 3: The area of triangle ABD is half of triangle ABC, which is $\frac{\text{frac}}{12} \left\{ \begin{array}{l} 2 \end{array} \right\} = 6$ . Step 4: Since E is the midpoint of AD, triangle ABE is half of triangle ABD. Therefore, the area of triangle ABE is $\frac{\text{frac}}{6} \left\{ \begin{array}{l} 2 \end{array} \right\} = 3$ . The final answer is 3. +(a) Step-wise Reasoning Accuracy Reward +Figure 2. Overview of the proposed StepGRPO. StepGRPO consists of two phases: a policy warm-up phase and a step-wise online policy optimization phase. After the warm-up, the policy model $\pi_{\theta}$ generates a group of reasoning paths $\{\mathbf{c}^i\}_{i=1}^M$ and assigns step-wise rewards using two proposed mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards reasoning paths that contain key intermediate steps, identified using a soft key-step matching technique. StepRVR rewards reasoning paths based on completeness and logical consistency, ensuring they are well-structured. StepGRPO then estimates the advantage $\hat{A}$ for policy optimization by using the average step-wise reasoning reward of a group of sampled reasoning paths as a baseline. Examples for StepRAR and StepRVR are illustrated in (a) and (b), respectively. + +# Pre-extracted key steps with Augmentations: + +1. AD is a median; median is $AD$ +2. equal area; ... +3. AE is half of AD; $AE = 1 / 2AD$ +4. frac{12}{2} {2} = 6; $\underline{12 / 2} = 6,\dots$ +5. E is the midpoint; .. +6. frac{6}{2} = 3; 6/2 = 3. ... + +# Soft key-step matching : + +Description: The image shows ...; #Rationale: The question asks for the area...; #Step1: ... we find AD is a median of ...; #Step2: ... AE splits triangle ABD ...; #Step3: ... The area of triangle ABD is $12/2 = 6$ , ..., and the area of triangle ABE is frac{6}{2} = 3. #The final answer is: 3. Step-wise Matching score: 3/6 + +# (b) Step-wise Reasoning Validity Reward + +Description $\rightarrow$ #Rationale $\rightarrow$ # Step1 $\rightarrow$ ... $\rightarrow$ #Step $N\rightarrow$ #Answer. + +i. Reasoning completeness + +Description $\rightarrow$ #Rationale $\rightarrow$ #Answer. Missing reasoning steps + +Description $\rightarrow$ # Step1 $\rightarrow$ ... $\rightarrow$ #Step $N\rightarrow$ #Answer. Missing rationale + +ii. Reasoning logic + +Description $\rightarrow$ #Rationale $\rightarrow$ #Answer $\rightarrow$ #Step1... $\rightarrow$ #StepN. X +#Description $\rightarrow$ #Step3 $\rightarrow$ #Rationale $\rightarrow$ ... $\rightarrow$ #Step I $\rightarrow$ #Answer X + +the policy model $\pi_{\theta}$ first generates a group of $M$ reasoning trajectories via multiple rollouts, i.e., $\{\mathbf{c}^i\}_{i=1}^M$ , where $\mathbf{c}^i = (a_1^i, a_2^i, \ldots, a_t^i, \ldots, a_T^i)$ . After obtaining a group of $M$ reasoning trajectories, we employ our proposed step-wise reasoning rewards to evaluate and reward each generated reasoning trajectory. Specifically, we introduce two types of rule-based step-wise rewards, i.e., step-wise reasoning accuracy (StepRAR) reward and step-wise reasoning validity reward (StepRVR). + +Step-wise reasoning accuracy reward (StepRAR) reduces the effect of learning from sparse reward by additionally rewarding reasoning paths that contain correct intermediate reasoning steps contributing to the final solution. Specifically, for each question $Q$ , we pre-extract a set of key reasoning steps $\mathbf{v} = \{v_{1}, v_{2}, \ldots\}$ from the corresponding reasoning path $\tau$ in dataset $D_{s}$ . We define key steps as the essential variables and equations that directly contribute to the final solution, and prompt GPT-4 to extract several key steps from the reasoning path for each question. To ensure efficient reward assignment, we refine the extracted steps by removing redundant content and retaining only the core few words necessary for reasoning. Furthermore, we + +augment each extracted key step into multiple equivalent formats to allow more flexible and accurate matching, preventing missed matches due to math-related formatting differences. For example, a mathematical expression such as " $\frac{6}{3} = 2$ " is augmented to "6/3 = 2" or "6 divided by 3 equals 2". + +With the extracted key reasoning steps $\mathbf{v} = \{v_{1}, v_{2}, \ldots\}$ and such soft marching mechanism, we calculate a match score for each generated reasoning path based on the ratio of matched key steps, i.e., $k^{i} = |\mathbf{v}_{\text{match}}| / |\mathbf{v}|$ . Then, StepRAR for $1 \leq t \leq T$ is defined as: + +$$ +r _ {a u c} ^ {i} \left(s _ {t}, a _ {t}, s _ {t + 1}\right) = \left\{ \begin{array}{l l} 1 + \alpha k ^ {i}, & \operatorname {a n s} \left(s _ {t + 1}\right) = y, \\ \alpha k ^ {i}, & \operatorname {a n s} \left(s _ {t + 1}\right) \neq \text {n u l l}, \neq y, \\ 0, & \operatorname {a n s} \left(s _ {t + 1}\right) = \text {n u l l}, \end{array} \right. \tag {2} +$$ + +where $y$ is the ground-truth answer extracted from CoT reasoning path. + +By leveraging pre-extracted key reasoning steps, StepRAR efficiently provides additional supervision with a simple soft matching mechanism, ensuring the model learns meaningful reasoning processes instead of guessing + +answers randomly. + +Step-wise reasoning validity reward (StepRVR) aims for ensuring the generated paths adhere to a logically structured and coherent progression beyond the reasoning accuracy. Prior studies [44, 46] have demonstrated structural reasoning, such as problem decomposition and progressive reasoning, facilitates more accurate and interpretable reasoning processes, as they encourage models to break down complex problems into multiple intermediate steps rather than direct answer generation. + +Inspired by these findings, we incorporate step-wise reasoning validity to reinforce well-organized reasoning paths that follow an expected logical flow. Specifically, we define StepRVR using two key criteria: reasoning completeness $\delta^c$ and reasoning logic $\delta^l$ . Reasoning completeness requires the response to include three essential components, i.e., a background analysis involving image description and rationale analysis to establish context, a step-by-step reasoning process and a final answer. In addition to the reasoning completeness, reasoning logic ensures the reasoning path to follow a logical progression, where the background analysis must come before solution steps and the final answer should only appear after reasoning steps are complete. + +With these two criteria, we define StepRVR as + +$$ +r _ {v a l} ^ {i} \left(s _ {t}, a _ {t}, s _ {t + 1}\right) = \left\{ \begin{array}{l l} 1, & \mathbb {I} \left(\delta^ {c} \left(s _ {t + 1}\right)\right) \cdot \mathbb {I} \left(\delta^ {l} \left(s _ {t + 1}\right)\right) = 1, \\ 0, & \text {o t h e r w i s e ,} \end{array} \right. \tag {3} +$$ + +where the reasoning trajectory is rewarded only if it satisfies both completeness and logical coherence. By enforcing this, StepRVR helps the model produce structured, interpretable and logically sound reasoning trajectories, enhancing both the quality and reliability of generated responses. + +Optimization with the step-wise rewards. After obtaining two types of step-wise rewards, we compute the overall reward for each reasoning path as $r^i = r_{auc}^i + r_{val}^i$ , and repeatedly compute the rewards for all generated reasoning paths, i.e., $\{r^1, r^2, \dots, r^M\}$ . + +To estimate the advantage of each reasoning trajectory, we normalize its reward relative to the group as follow: + +$$ +\hat {A} ^ {i} = \frac {r ^ {i} - \operatorname {m e a n} \left(\left\{r ^ {1} , r ^ {2} , \dots , r ^ {M} \right\}\right)}{\operatorname {s t d} \left(\left\{r ^ {1} , r ^ {2} , \dots , r ^ {M} \right\}\right)}, \tag {4} +$$ + +where the mean group reward serves as the baseline, and $\hat{A}_i$ measures how much better or worse $r_i$ is compared to other reasoning trajectories within the group. Following this, we optimize the policy model with the loss defined as: + +$$ +\begin{array}{l} \mathcal {L} _ {\text {S t e p R L}} = - \underset {Q \in D _ {s}} {\mathbb {E}} \left[ \frac {1}{M} \sum_ {i = 1} ^ {M} \left(\frac {\pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right)}{\left[ \pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right) \right] _ {\text {n o g r a d}}} \hat {A} ^ {i} \right. \right. \tag {5} \\ - \beta D _ {K L} \left(\pi_ {\theta} | | \pi_ {r e f}\right) ], \\ \end{array} +$$ + +Algorithm 1 Step-wise Group Relative Policy Optimization +Input: Policy model $\pi_{\theta}$ initialized by a pre-trained +MLLM; a multimodal dataset $D_{s} = \{Q^{n},\tau^{n}\}_{n = 1}^{N}$ +Output: Trained policy model $\pi_{\theta}$ +Policy warm-up: +for iter $= 1$ to $N$ do Sample $\{Q,\tau \} \in D_s$ Optimize policy model $\pi_{\theta}$ by Eq. 1 +end for +Step-wise online policy optimization: +for iter $= 1$ to $N$ do Sample $\{Q,\tau \} \in D_s$ Generate a group of reasoning paths $\{\mathbf{c}^i\}_{i = 1}^M\sim \pi_\theta$ Obtain step-wise rewards $\{r^i\}_{i = 1}^M$ by Eqs. 2-3 Obtain relative advantages $\{\hat{A}^i\}_{i = 1}^M$ by Eq. 4 Optimize policy model $\pi_{\theta}$ by Eqs. 5-6 +end for +return policy model $\pi_{\theta}$ + +where KL divergence is adopted to regularize the policy model, preventing excessive deviation from the reference model. The reference model is typically initialized as the same model as the policy model but remains frozen during RL training. The KL divergence between the policy model and the reference model is estimated as in [34]: + +$$ +D _ {K L} \left(\pi_ {\theta} \right\| \pi_ {r e f} = \frac {\pi_ {r e f} \left(\mathbf {c} ^ {i} \mid Q\right)}{\pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right)} - \log \frac {\pi_ {r e f} \left(\mathbf {c} ^ {i} \mid Q\right)}{\pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right)} - 1. \tag {6} +$$ + +# 4. Experiment + +This section presents experiments including datasets and implementation details, main experimental results, ablation studies and discussion, respectively. More details are to be described in the ensuing subsections. + +# 4.1. Datasets + +For policy warm-up, we adopt Mulberry-260k [46] for supervised fine-tuning. For step-wise online policy optimization, we randomly sample 10K data from Mulberry-260k as our training data. For evaluation, we adopt 8 widely-used multimodal benchmarks for comprehensively evaluating our proposed StepGRPO, including MathVista [23], MMStar [6], Math-Vision [40], ChartQA [26], DynaMath [57], HallusionBench [12], MathVerse [54], MME [11] and MM-Reason [45]. These multimodal benchmarks cover a wide range of tasks from mathematical reasoning, chart understanding, visual hallucination and general visual understanding. + +# 4.2. Implementation Details + +Our proposed StepGRPO is generally applicable to different MLLMs. In our experiments, we adopt two state-of-the-art + +
MethodMathVistaMMStarMath-VChartQADynaMathHallBenchMathVerseMMEsumMMReasonAVG
Closed-Source Model
GPT-4o [15]63.863.930.385.763.755.039.4232921.156.2
Claude-3.5 Sonnet [1]67.762.2-90.864.855.0-1920--
Open-Source Model
Cambrain-1-8B [38]49.0--73.3------
MM-1.5-7B [51]47.6--78.6---1861--
Idefics3-LLaMA3-8B [18]58.455.9-74.8---1937--
InternVL2-8B [8]58.361.5-83.339.7--2210--
MiniCPM-V-2.6-8B [48]60.657.5---48.1-2348--
DeepSeek-VL2-MOE-4.5B [43]62.861.3-86.0---225311.5-
Reasoning Model
LLaVA-CoT-11B [44]54.857.6---47.8----
LLaVA-Reasoner-8B [55]50.654.0-83.0------
Insight-V-8B [10]49.857.4-77.4---2069--
Mulberry-7B [46]63.161.3-83.945.154.1-239611.8-
LlamaV-o1-11B [37]54.459.4---63.5----
Vision-R1-7B [14]73.5-----52.4---
LMM-R1 [30]63.258.026.3---41.5---
R1-ShareVL-7B [47]75.467.029.5---52.8---
Qwen2-VL-2B [41]43.048.012.473.524.941.719.718727.737.5
R1-VL-2B (Ours)52.149.817.175.229.444.026.220488.341.6
Qwen2-VL-7B [41]58.260.716.383.042.150.632.5232711.948.7
R1-VL-7B (Ours)63.560.024.783.945.254.740.0237612.552.1
Qwen2.5-VL-7B [2]68.263.925.187.353.252.149.2234717.355.5
R1-VL-7B* (Ours)74.366.228.287.756.557.252.2239517.958.4
+ +Table 1. Main experimental results. To comprehensively examine the proposed StepGRPO, we conduct extensive experiments with two baseline models on eight benchmarks, and compare StepGRPO with various state-of-the-art MLLMs.* indicates that the model is trained using Qwen2.5-VL-7B as the base model with the data from [47]. + +
Warm-upStep-wise reasoning rewardsMathVista
StepRARStepRVR
58.2
61.2
62.4
61.9
63.5
+ +Table 2. Ablation study of StepGRPO over Qwen2-VL-7B. + +open-source MLLMs, i.e., Qwen2-VL-2B and Qwen2-VL-7B [41]. For the policy warm-up phase, we set the training batch size to 128. Following prior work [46], we use a learning rate of $1\mathrm{e}^{-5}$ for Qwen2-VL-2B and $5\mathrm{e}^{-6}$ for Qwen2-VL-7B, respectively. + +For the step-wise online policy optimization phase, we perform 4 rollouts per question $(M = 4)$ and set the sampling temperature to 1.2 to encourage diverse reasoning paths. The maximum sequence length is set to $L = 1024$ , ensuring that the model can generate complete reasoning paths. Both the policy model and reference model are ini + +tialized from the model after the warm-up, with the reference model frozen during RL training. The policy model's learning rate is $1\mathrm{e}^{-6}$ , and we set the batch size to 4. We set the coefficient of match score $\alpha$ to 0.1 to balance its effect. Following [39], the KL divergence coefficient $\beta$ in Eq. 5 is set to 0.04 by default. All experiments are conducted on 4 H100-80GB GPUs. + +# 4.3. Main Experimental Results + +We conduct a comprehensive evaluation of R1-VL across eight widely used benchmarks, comparing it with various state-of-the-art MLLMs, as shown in Table 1. + +We first compare R1-VL with its baseline models, Qwen2-VL-2B and Qwen2-VL-7B. The baseline models exhibit limited reasoning capability, leading to very few reasoning paths receiving rewards, which negatively impacts the reasoning capability. In contrast, R1-VL with our proposed StepGRPO consistently improves the baseline models by significant margins, achieving $4.6\%$ improvement over Qwen2-VL-2B and $3.8\%$ over Qwen2-VL-7B. This improvement is largely attributed to that StepGRPO introduces step-wise reasoning accuracy and validity rewards, + +
Number of generations M per question
Method23456
R1-VL-7B62.562.863.563.263.7
+ +which provide rich and informative supervision at each reasoning step, effectively mitigating the sparse reward issue for MLLMs. + +In addition, we compare R1-VL with existing state-of-the-art reasoning MLLMs. As shown in Table 1, R1-VL achieves better performance on most benchmarks, particularly in mathematical reasoning tasks. For example, R1-VL-7B surpasses Mulberry-7B and LlamaV-o1-11B by $0.6\%$ and $9.3\%$ respectively on the reasoning-intensive benchmark MathVista. Notably, R1-VL-2B even outperforms larger MLLMs. For instance, R1-VL-2B largely outperforms LLaVA-Reasoner-8B and LLaVA-CoT-11B by $13.1\%$ and $9.3\%$ on MathVista, respectively. This superior performance demonstrates that StepGRPO effectively enhances MLLMs' reasoning abilities by encouraging self-improvement via step-wise online reinforcement learning, rather than merely imitating positive reasoning paths. + +Additionally, we benchmark R1-VL against general MLLMs, including closed-source models such as GPT-4o and Claude-3.5 Sonnet, as well as open-source models like Cambrain-1-8B and DeepSeek-VL2-MOE-4.5B. We observe that R1-VL outperforms most open-source MLLMs and achieves competitive results against closed-source models. For example, R1-VL-7B achieves 63.7 accuracy on MathVista, closely matching GPT-4o's accuracy of 63.8. These results further validate StepGRPO's effectiveness in enhancing the reasoning capabilities of MLLMs. + +# 4.4. Ablation Study + +We conduct ablation studies for StepGRPO on Qwen2-VL-7B over MathVista benchmark for examining the effect of step-wise reasoning rewards including step-wise reasoning accuracy reward (StepRAR) and step-wise reasoning validity reward (StepRVR), as well as the role of the warm-up phase. As shown in Table 2, involving a warm-up stage improves baseline model to $61.2\%$ , allowing the model to learn basic reasoning knowledge before reinforcement learning. In addition, including either StepRAR or StepRVR into the online reinforcement learning process outperforms the model with warm-up by large margins, demonstrating that both two types of step-wise rewards contribute to enhancing step-by-step reasoning capabilities. The best performance (i.e., $63.7\%$ ) is achieved when both StepRAR and StepRVR are applied together. This shows that StepGRPO effectively improves complex + +Table 3. Parameter analysis of $M$ . The experiments are conducted on Qwen2-VL-7B over MathVista. + +
MethodMathVista
Warm-up61.7
Warm-up + Outcome-level reward62.3
Warm-up + Step-wise reward (Ours)63.5
+ +Table 4. Effectiveness of the step-wise reasoning rewards. The experiments are conducted on Qwen2-VL-7B over MathVista. + +![](images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg) +Figure 3. Comparison between StepGRPO and SFT. The experiments are conducted on Qwen2-VL-7B over MathVista. + +reasoning tasks by reinforcing both the correctness of intermediate steps and the overall logical structure of the reasoning process. + +# 4.5. Discussion + +Parameter analysis. We conduct the parameter analysis on the number of generations $M$ in a group with Qwen2-VL7B over benchmark MathVista, analyzing its impact on reasoning performance. As described in Section 3, $M$ controls the number of generated reasoning trajectories per question during the RL phase. Table 3 shows that a larger $M$ generally leads to better performance. This is because, in group relative optimization, the baseline reward is estimated as the average reward of all generated reasoning paths. A larger $M$ results in a more stable and accurate baseline estimation, whereas a small $M$ may lead to high variance in baseline estimation, making the optimization process less reliable. However, increasing $M$ also introduces higher computational costs. Therefore, we set $M = 4$ as the default to balance performance and computational efficiency. + +Effectiveness of the step-wise reward. Our proposed stepwise reward mechanism plays a crucial role in mitigating the sparse reward issue by providing fine-grained supervision at each reasoning step. To further validate its effectiveness, we conduct an experiment comparing outcome-level reward against our step-wise reward. Specifically, we evaluate three settings: (1) Warm-up only; (2) Warm-up + Outcome-level Reward, where the model is optimized with outcome-level reward; and (3) Warm-up + Step-wise Re + +![](images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg) +Question: Subtract all red things. +Subtract all tiny matte balls. How many objects are left? +Correct answer: 5 +Figure 4. Qualitative comparison. + +Qwen2-VL-7B: + +First, identify the red objects in the image. There is 1 red object, which is a small red sphere. Incorrect reasoning + +Then, subtract the red object from the total number of objects. There are 7 objects in total and subtracting the 1 red object leaves 6 objects. Identify the tiny matte balls in the image. There are 2 tiny matte balls, which are the small green cylinder and the cyan sphere. Subtract the tiny matte balls from the remaining objects. + +After subtracting the red object, there are 6 objects left. Subtracting the 2 tiny matte balls leaves 4 objects. Answer: 4 + +# Mulberry-7B: + +Image Description: The image shows several 3D objects on a flat surface. There are two red objects: a small red sphere and a larger green cylinder. Other objects include a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a teal sphere. + +Rationales: The question asks to subtract all red things and all tiny matte balls. We need to identify these objects in the image and count the remaining ones. + +Let's think step by step. + +Step 1: Identify and count all red objects. There is one red sphere. Incorrect reasoning Step 2: Identify and count all tiny matte balls. There is one teal sphere, which is not red, so it is not subtracted. + +Step 3: Subtract the red objects and tiny matte balls from the total number of objects: $7 - 1 = 6$ + +The final answer is: 6 + +# R1-VL-7B (Ours): + +Image Description: + +The image shows a collection of 3D geometric objects on a flat surface. There are various colored shapes, including a red sphere, a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a cyan sphere. + +Rationales: + +The question asks to subtract all red things and all tiny matte balls, then determine how many objects are left. This involves identifying and counting specific objects based on their color and size. + +Let's think step by step. + +Step 1: + +Identify and count all objects in the image. There are seven objects: a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, a red sphere, and a cyan sphere. + +Step 2: + +Subtract all red things. There is one red sphere. + +![](images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg) + +Step 3: + +Subtract all tiny matte balls from the remaining objects. There is one cyan sphere, which is matte and small. + +Step 4: + +Count the remaining objects after removing the red sphere and the cyan sphere. + +The final answer is: 5 + +ward, where the model is optimized with our proposed stepwise reasoning reward. As shown in Table 4, both outcome-level reward and our step-wise reward improve the warm-up model's performance, while our step-wise reward achieves better performance. This further demonstrates that stepwise rewards are more effective in enhancing MLLMs' reasoning capabilities, as they provide more fine-grained supervision and largely mitigate the sparse reward issue. + +Comparison to supervised fine-tuning (SFT). As discussed before, StepGRPO encourages MLLM to self-improve the reasoning ability with step-wise reward signals rather than merely imitating the successful reasoning paths. Here, we conduct experiments to further compare StepGRPO with SFT. Specifically, we start with the model after the warm-up and conduct the experiments with Qwen2-VL-7B over MathVista. As shown in Fig. 3, under the same number of training steps, StepGRPO consistently outperforms SFT, demonstrating the effectiveness of step-wise reinforcement learning. This is largely attributed to StepGRPO's ability to refine reasoning trajectories through self-exploration and reward-guided optimization, rather than solely relying on passive imitation of reasoning paths. By leveraging step-wise reasoning rewards, StepGRPO provides more rich and informative supervision, leading to better reasoning processes compared to SFT. + +Qualitative comparison. We provide qualitative comparison of Qwen2VL-7B, Mulberry-7B and our R1-VL-7B. As shown in Fig. 4, Qwen2-VL-7B generates relatively short responses, lacking a thorough reasoning process. While Mulberry-7B generates detailed reasoning paths, its intermediate steps contain errors, leading to incorrect final an + +svers. In contrast, R1-VL-7B enables more accurate step-by-step reasoning process. + +We provide more discussions, experimental results and qualitative analysis in the appendix. + +# 5. Conclusion + +This paper presents StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise reward mechanism. Specifically, StepGRPO introduces two rule-based reasoning reward mechanisms, i.e., Step-wise Reasoning Accuracy Reward that rewards the intermediate reasoning steps based on a soft key-step matching technique and Step-wise Reasoning Validity Reward that rewards the reasoning path's reasoning structure and logical consistency though a reasoning completeness and logic evaluation method. In this way, StepGRPO enables to effectively mitigate the sparse reward issue for MLLMs without the need of process reward models and encourages more structured and logically consistent reasoning process. With the proposed StepGRPO, we develop R1-VL, a series of MLLMs with superior reasoning capability. Extensive experiments over eight benchmarks demonstrate the superiority of the proposed StepGRPO compared with the state-of-the-art MLLMs. + +Acknowledgement. This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL). + +# References + +[1] Anthropic. Claude 3.5 sonnet, 2024. 1, 2, 6 +[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6 +[3] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. 3 +[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3 +[5] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. arXiv preprint arXiv:2406.10858, 2024. 3 +[6] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.5 +[7] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. 3 +[8] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 2, 6 +[9] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 2 +[10] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. 3, 6 +[11] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 5 +[12] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 5 +[13] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint + +arXiv:2501.12948,2025.1,3 +[14] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Zhe Xu, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 3, 6 +[15] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 2, 6 +[16] Leslie Pack Kaelbling, Michael L Littman, and Andrew W Moore. Reinforcement learning: A survey. Journal of artificial intelligence research, 4:237-285, 1996. 3 +[17] Xiang Lan, Feng Wu, Kai He, Qinghao Zhao, Shenda Hong, and Mengling Feng. Gem: Empowering mllm for grounded ecg understanding with time series and images. arXiv preprint arXiv:2503.06073, 2025. 2 +[18] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. In Workshop on Responsibly Building the Next Generation of Multimodal Foundational Models, 2024. 1, 2, 6 +[19] Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv preprint arXiv:2306.00890, 2023. 2 +[20] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. 2 +[21] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2 +[22] Yuliang Liu, Biao Yang, Qiang Liu, Zhang Li, Zhiyin Ma, Shuo Zhang, and Xiang Bai. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024. 2 +[23] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5 +[24] Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. Reft: Reasoning with reinforced fine-tuning. arXiv preprint arXiv:2401.08967, 2024. 3 +[25] Chenyang Lyu, Minghao Wu, Longyue Wang, Xinting Huang, Bingshuai Liu, Zefeng Du, Shuming Shi, and Zhaopeng Tu. Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration. arXiv preprint arXiv:2306.09093, 2023. 2 +[26] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5 +[27] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Tiancheng Han, Botian Shi, Wenhai Wang, Junjun He, et al. Mm-eureka: Exploring the frontiers of multimodal reasoning with rule-based reinforce + +ment learning. arXiv preprint arXiv:2503.07365, 2025. 3 +[28] OpenAI. Gpt-4 technical report, 2023. 3 +[29] OpenAI. Introducing openai o1, 2024. 2 +[30] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 3, 6 +[31] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. 3 +[32] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 3 +[33] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. 3 +[34] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 1, 3, 5 +[35] Guangzhi Sun, Wenyi Yu, Changli Tang, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, Yuxuan Wang, and Chao Zhang. video-salmonn: Speech-enhanced audio-visual large language models. arXiv preprint arXiv:2406.15704, 2024. 2 +[36] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 1, 3 +[37] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamavol: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. 1, 3, 6 +[38] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 1, 2, 6 +[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.6 +[40] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2025. 5 +[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 6 +[42] Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng + +Chua. Next-gpt: Any-to-any multimodal lIm. arXiv preprint arXiv:2309.05519, 2023. 2 +[43] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. 1, 2, 6 +[44] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. 1, 3, 5, 6 +[45] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. MMreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. 5 +[46] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. 1, 3, 5, 6 +[47] Huanjin Yao, Qixiang Yin, Jingyi Zhang, Min Yang, Yibo Wang, Wenhao Wu, Fei Su, Li Shen, Minghui Qiu, Dacheng Tao, et al. R1-sharev1: Incentivizing reasoning capability of multimodal large language models via share-grpo. arXiv preprint arXiv:2505.16673, 2025. 3, 6 +[48] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 6 +[49] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Yuhao Dan, Chenlin Zhao, Guohai Xu, Chenliang Li, Junfeng Tian, et al. mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499, 2023. 2 +[50] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. 3 +[51] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 1, 2, 6 +[52] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2 +[53] Jingyi Zhang, Jiaxing Huang, Xiaoqin Zhang, Ling Shao, and Shijian Lu. Historical test-time prompt tuning for vision foundation models. Advances in Neural Information Processing Systems, 37:12872-12896, 2024. 2 +[54] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186. + +Springer, 2024. 5 + +[55] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. 1, 3, 6 +[56] Xiaoman Zhang, Chaoyi Wu, Ziheng Zhao, Weixiong Lin, Ya Zhang, Yanfeng Wang, and Weidi Xie. Pmc-vqa: Visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415, 2023. 2 +[57] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 5 \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12937/images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg b/data/2025/2503_12xxx/2503.12937/images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f70c9b4d427ee468ad8854ba89fef2042790a0e4 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cf1f73826e1c61a196547c412e23a297e44fce3e37a08cf7a5a57995dfb18d +size 170609 diff --git a/data/2025/2503_12xxx/2503.12937/images/1558e8c3b9e8da8ef6634dfd8880bbdd56fd4f2cc1fe7590b5502e41bf971d92.jpg b/data/2025/2503_12xxx/2503.12937/images/1558e8c3b9e8da8ef6634dfd8880bbdd56fd4f2cc1fe7590b5502e41bf971d92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfee19de6c59457e2d9989c0dc75b8bdf1fe178e --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/1558e8c3b9e8da8ef6634dfd8880bbdd56fd4f2cc1fe7590b5502e41bf971d92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d6a81816280f4c15910d1b0ba23d34acf8f130a9b255cba865cad22b2955b18 +size 11801 diff --git a/data/2025/2503_12xxx/2503.12937/images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg b/data/2025/2503_12xxx/2503.12937/images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adfe4c90e20bfe4ffad96b9e1963351266ec2b0e --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0736df9d7cc0840355b36a409407bce639811a499f3fa6af3310b7e6b25d1a8 +size 17142 diff --git a/data/2025/2503_12xxx/2503.12937/images/31ea3d8e55752229dfade1dd6643121292be3f7cec00e13905b27dd70e12926c.jpg b/data/2025/2503_12xxx/2503.12937/images/31ea3d8e55752229dfade1dd6643121292be3f7cec00e13905b27dd70e12926c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38d76afce190d3a0f5a1480207959f74d2bc2ae6 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/31ea3d8e55752229dfade1dd6643121292be3f7cec00e13905b27dd70e12926c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7486c7df363c737bea8ce1b8a36715fbaa8bd867cf99798e33892cb5b2d9bc73 +size 6308 diff --git a/data/2025/2503_12xxx/2503.12937/images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg b/data/2025/2503_12xxx/2503.12937/images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93da2715376b9838681899f55996292169965049 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c99b7e567d83368d434777cdd5f593bfe1478264e5571496913ce7aecc13d32a +size 19817 diff --git a/data/2025/2503_12xxx/2503.12937/images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg b/data/2025/2503_12xxx/2503.12937/images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9616fddbd26005656fd2421122b2e8667ec3a291 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54b3b67a9ca1e4a69c0ac4f69edca094321a5ada7072a6a3b045c7eadc497c87 +size 4872 diff --git a/data/2025/2503_12xxx/2503.12937/images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg b/data/2025/2503_12xxx/2503.12937/images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94665855bb2e1e73896f541205ba74fcdecda9fb --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44101c0585fdffdb779a8b7ce35e7c99a973f887249df82f0e62e5e658a1fb40 +size 19875 diff --git a/data/2025/2503_12xxx/2503.12937/images/6b3c67a4c50ce11940655a5fb86d1d6562af7aedeea159567fe508f24e38ba79.jpg b/data/2025/2503_12xxx/2503.12937/images/6b3c67a4c50ce11940655a5fb86d1d6562af7aedeea159567fe508f24e38ba79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..064da3e2592e814427d8f8a26debbb760a489ce7 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/6b3c67a4c50ce11940655a5fb86d1d6562af7aedeea159567fe508f24e38ba79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69eccbbbd8757dda81ca425a7f74c00e3d5809911d05bcb82fc69c321328d1d2 +size 9080 diff --git a/data/2025/2503_12xxx/2503.12937/images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg b/data/2025/2503_12xxx/2503.12937/images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..314cfb98a0b1f3d0444ada3048b75a68afaeb73f --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d93e04642c2c57e4f0e0fb60cf05d418c5bf1b5e820735dd9f5d8969558d3a8 +size 61548 diff --git a/data/2025/2503_12xxx/2503.12937/images/89c21106dadd9e892de897a3997bdb6531f3aa0bde3862bde14835d4ccdfd1d5.jpg b/data/2025/2503_12xxx/2503.12937/images/89c21106dadd9e892de897a3997bdb6531f3aa0bde3862bde14835d4ccdfd1d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d08384ccfb597c699570d395a99585bbed2af9a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/89c21106dadd9e892de897a3997bdb6531f3aa0bde3862bde14835d4ccdfd1d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0c9754e0de33a4c76a80c4274837dbf489fc2c94d5d2443a4b0cbb322f3750e +size 7536 diff --git a/data/2025/2503_12xxx/2503.12937/images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg b/data/2025/2503_12xxx/2503.12937/images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efc7c3de48c5e6f80325a7ab34f32eb443dbf11e --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a22790f9b4a22d4160ca9a880e4f9b5b2544517b602b177e1c4ae20623d2698 +size 26345 diff --git a/data/2025/2503_12xxx/2503.12937/images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg b/data/2025/2503_12xxx/2503.12937/images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1902e2054bd63bc843c2cbc3111975d073a1649d --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8842629cc3276791a4e891f38c502d1dcee54d4d1d95283bca8fac66562fdddb +size 19084 diff --git a/data/2025/2503_12xxx/2503.12937/images/cc8691112c299eff8cc7beb85c16c3122f1c04b330a8141f6be8b51d0884c159.jpg b/data/2025/2503_12xxx/2503.12937/images/cc8691112c299eff8cc7beb85c16c3122f1c04b330a8141f6be8b51d0884c159.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2279c1a2d3f6c70327e08a0180479bd20a7e21ae --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/cc8691112c299eff8cc7beb85c16c3122f1c04b330a8141f6be8b51d0884c159.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6025cc2ce17b9f718cc5329854a857dcc2e0ba9549fd03dd7fae8fe6d46d176a +size 9936 diff --git a/data/2025/2503_12xxx/2503.12937/images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg b/data/2025/2503_12xxx/2503.12937/images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e7774199c371a5e9e3e1725768bb6dffe6f27ef --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16743e03246f448bdcf629028f5661e4c1f97319b1d97137cc5982fc7f2c04c2 +size 937 diff --git a/data/2025/2503_12xxx/2503.12937/images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg b/data/2025/2503_12xxx/2503.12937/images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..017d12a6881b7048be3901330b4c271ca25ca5b4 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7784ea92d40f6d73e350fa5357f57f0cbb3bfb63d73d3f8ccd15dc595734c4c +size 5361 diff --git a/data/2025/2503_12xxx/2503.12937/images/f8c2153c05b6d636fd93e2d9701b86feb20c9c13112ae8afc8e353930bce0932.jpg b/data/2025/2503_12xxx/2503.12937/images/f8c2153c05b6d636fd93e2d9701b86feb20c9c13112ae8afc8e353930bce0932.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68dae1d83b58a4b6532dbddfc35641799aee9ac3 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/images/f8c2153c05b6d636fd93e2d9701b86feb20c9c13112ae8afc8e353930bce0932.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffe87d42baba2f4715201d5d9bf2b548441f22838b74731ef275f71f82233dc8 +size 11732 diff --git a/data/2025/2503_12xxx/2503.12937/layout.json b/data/2025/2503_12xxx/2503.12937/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d9799f60e88896c1b75db1fbd733466e238e9935 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12937/layout.json @@ -0,0 +1,9642 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 78, + 102, + 533, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 102, + 533, + 140 + ], + "spans": [ + { + "bbox": [ + 78, + 102, + 533, + 140 + ], + "type": "text", + "content": "R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 167, + 548, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 167, + 548, + 198 + ], + "spans": [ + { + "bbox": [ + 61, + 167, + 548, + 198 + ], + "type": "text", + "content": "Jingyi Zhang Jiaxing Huang Huanjin Yao Shunyu Liu Xikun Zhang Shijian Lu Dacheng Tao Nanyang Technological University, Singapore" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 152, + 224, + 200, + 237 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 224, + 200, + 237 + ], + "spans": [ + { + "bbox": [ + 152, + 224, + 200, + 237 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 251, + 296, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 251, + 296, + 539 + ], + "spans": [ + { + "bbox": [ + 55, + 251, + 296, + 539 + ], + "type": "text", + "content": "Recent studies generally enhance MLLMs' reasoning capabilities via supervised fine-tuning on high-quality chain-of-thought reasoning data, which often leads models to merely imitate successful reasoning paths without understanding what the wrong reasoning paths are. In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. To this end, we design Step-wise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding. Specifically, StepGRPO introduces two novel rule-based reasoning rewards: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards the reasoning paths that contain necessary intermediate reasoning steps via a soft key-step matching technique, while StepRAR rewards reasoning paths that follow a well-structured and logically consistent reasoning process through a reasoning completeness and logic evaluation strategy. With the proposed StepGRPO, we introduce R1-VL, a series of MLLMs with outstanding capabilities in step-by-step reasoning. Extensive experiments over 8 benchmarks demonstrate the superiority of our methods. Code is available at link." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 575, + 135, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 575, + 135, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 575, + 135, + 588 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 596, + 295, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 596, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 55, + 596, + 295, + 693 + ], + "type": "text", + "content": "Multimodal large language models (MLLMs) have achieved significant progress in vision-language understanding [1, 8, 15, 18, 21, 38, 43, 51]. Recent efforts generally enhance MLLMs' reasoning capabilities by employing supervised fine-tuning (SFT) on high-quality chain-of-thought (CoT) reasoning data generated by powerful models (e.g., GPT4) [37, 44, 46, 55]. For example, Mulberry [46] introduces CoMCTS, which utilizes multiple" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 331, + 224, + 541, + 358 + ], + "blocks": [ + { + "bbox": [ + 331, + 224, + 541, + 358 + ], + "lines": [ + { + "bbox": [ + 331, + 224, + 541, + 358 + ], + "spans": [ + { + "bbox": [ + 331, + 224, + 541, + 358 + ], + "type": "image", + "image_path": "8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 369, + 555, + 490 + ], + "lines": [ + { + "bbox": [ + 313, + 369, + 555, + 490 + ], + "spans": [ + { + "bbox": [ + 313, + 369, + 555, + 490 + ], + "type": "text", + "content": "Figure 1. For MLLMs, online reinforcement learning with outcome-level reward, like in Deepseek-R1's GRPO [34], often suffers from sparse reward issues, where only a few reasoning paths can receive positive/high rewards during training, ultimately leading to poor exploration efficiency and unstable learning process. To tackle this, we propose a novel online reinforcement learning framework that incorporates step-wise reasoning rewards in addition to outcome-level rewards, encouraging MLLMs to iteratively refine their reasoning with dense rewards and resulting in a more stable training process and improved reasoning capability. The experiments are conducted on Qwen2-VL-7b over MathVista." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 517, + 555, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 555, + 615 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 555, + 615 + ], + "type": "text", + "content": "models to collectively search and identify effective reasoning paths, followed by SFT on the collected reasoning data. However, SFT approaches focus solely on positive reasoning paths (i.e., those leading to correct answers), while the negative reasoning paths are largely neglected. This limitation may cause the model to merely imitate successful reasoning paths without understanding what the flawed and wrong reasoning paths are." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 556, + 715 + ], + "type": "text", + "content": "In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. Recent advancements in NLP, such as Deepseek-R1 [13] and Kimi-K1.5 [36], have shown great potential in incentivizing the reasoning capability of LLMs via actively selfexploring. The core design of these advances (e.g., GRPO in Deepseek-R1) lies in online reinforcement learning without the need for reward models, which encourages an LLM" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 225, + 37, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 225, + 37, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 225, + 37, + 563 + ], + "type": "text", + "content": "arXiv:2503.12937v2 [cs.AI] 4 Aug 2025" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 702, + 280, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 702, + 280, + 714 + ], + "spans": [ + { + "bbox": [ + 69, + 702, + 280, + 714 + ], + "type": "text", + "content": "Correspondence to: Jiaxing Huang {jiaxing.huang@ntu.edu.sg}." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "type": "text", + "content": "to generate a group of reasoning paths and iteratively refine its reasoning process by rewarding the generated reasoning paths based on a rule-based reward function. Typically, an outcome-level reward strategy is used: reasoning paths leading to correct answers receive higher rewards, while those leading to incorrect answers receive lower ones." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 146, + 294, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 146, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 55, + 146, + 294, + 289 + ], + "type": "text", + "content": "An intuitive idea is to directly apply these simple and effective LLM online reinforcement learning methods for MLLMs. However, relying solely on outcome-level rewards, like in Deepseek-R1's GRPO, often suffers from sparse reward issues on MLLM reasoning learning, resulting in suboptimal performance. Specifically, most MLLMs, especially smaller ones, exhibit very limited capability in long-chain reasoning accuracy and validity, whereas only a few MLLM-generated reasoning paths can receive positive/high rewards. This lack of positive reward signals reduces exploration efficiency and leads to an unstable learning process, as illustrated in Fig. 1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 291, + 294, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 291, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 291, + 294, + 422 + ], + "type": "text", + "content": "We propose to tackle this sparse reward issue by introducing dense step-wise reasoning rewards in addition to sparse outcome-level rewards. To this end, we design Stepwise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding while using no additional process reward models. Specifically, StepGRPO introduces two novel rule-based reasoning reward mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 424, + 295, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 424, + 295, + 639 + ], + "spans": [ + { + "bbox": [ + 55, + 424, + 295, + 639 + ], + "type": "text", + "content": "StepRAR rewards the reasoning path using a soft key-step matching technique that evaluates whether the reasoning path contains key intermediate reasoning steps (i.e., the necessary steps to reach the correct final solution). StepRVR rewards the reasoning path based on a reasoning completeness and logic evaluation method, which assesses whether the reasoning process is well-structured and logically consistent. In this way, StepRAR and StepRVR help mitigate the sparse reward issue by providing informative rewards, even when the reasoning path does not produce the correct final answer – as long as it includes key intermediate reasoning steps or follows a structured and logical reasoning process. With StepRAR and StepRVR, StepGRPO takes the average step-wise reasoning rewards of a group of sampled reasoning paths as a baseline to estimate the advantage for policy optimization. Using the proposed StepGRPO, we develop R1-VL, a series of MLLMs with R1-like step-by-step reasoning capabilities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 642, + 294, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 294, + 713 + ], + "type": "text", + "content": "The proposed StepGRPO offers two key advantages. 1) Effectiveness. StepGRPO introduces two step-wise reasoning reward mechanisms with group relative optimization, which provide rich and fine-grained step-wise reasoning rewards along the whole reasoning trajectory beyond the final answer. This mitigates the sparse reward issue and encour" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": "ages more structured, logically consistent reasoning trajectories. 2) Efficiency. StepGRPO achieves step-wise reasoning rewarding in a rule-based manner, which provides step-wise reasoning rewards while eliminating the need of process reward models. This significantly reduces computational overhead while maintaining fine-grained step-wise supervisions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 157, + 553, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 157, + 553, + 323 + ], + "spans": [ + { + "bbox": [ + 313, + 157, + 553, + 323 + ], + "type": "text", + "content": "The main contributions of this work are threefold. First, we propose StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via a simple, effective and dense step-wise rewarding. Second, we design two novel rule-based reasoning reward mechanisms, i.e., step-wise reasoning accuracy reward and step-wise reasoning validity reward, which effectively mitigate the sparse reward issue for MLLMs without the need of process reward models. Third, with the proposed StepGRPO, we develop R1-VL, a series MLLMs that have superior reasoning capabilities. Forth, extensive experiments over multiple benchmarks show that R1-VL achieves superior performance compared with state-of-the-art MLLMs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 336, + 400, + 349 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 336, + 400, + 349 + ], + "spans": [ + { + "bbox": [ + 313, + 336, + 400, + 349 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 357, + 504, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 504, + 369 + ], + "type": "text", + "content": "2.1. Multimodal Large Language Model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 375, + 553, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 375, + 553, + 602 + ], + "spans": [ + { + "bbox": [ + 313, + 375, + 553, + 602 + ], + "type": "text", + "content": "Multimodal Large Language Models (MLLMs) [1, 8, 15, 18, 21, 38, 43, 51, 52] have shown remarkable advancements across a wide range of vision-language understanding tasks, demonstrating their capabilities in comprehending and analyzing visual contents across various application domains. Early research on MLLMs primarily focuses on text generation based on text prompts and input multiple modalities such as images [20, 21, 53], videos [9, 35]. Recent advancements further enhance the capabilities of MLLMs from various aspects. For example, recent models [25, 42] incorporate multimodal inputs and outputs such as video, audio, and point cloud inputs beyond text and images. In addition, some efforts attempt to adapt MLLMs for domain-specific tasks, such as medical image understanding [17, 19, 56] and document analysis [22, 49]. In this work, we focus on enhancing the reasoning ability of MLLMs in tackling complex reasoning tasks and introduce R1-VL, a series of MLLMs that have superior reasoning capability." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 611, + 424, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 424, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 424, + 624 + ], + "type": "text", + "content": "2.2. MLLM Reasoning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 629, + 553, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 715 + ], + "type": "text", + "content": "Inspired by the advances in NLP that show great potential in learning to reason and tackling complex language tasks [29], recent studies attempt to enhance the reasoning capability of MLLM. Generally, current MLLM reasoning methods improve the reasoning capability of MLLM by generating high-quality chain-of-thoughts (CoT) data using powerful model (e.g., GPT-4) and performing supervised" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 192 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 192 + ], + "type": "text", + "content": "fine-tuning with the collected data [10, 37, 44, 46, 55]. For example, Mulberry [46] introduces Collective Monte Carlo Tree Search (MCTS) into MLLM and proposes CoMCTS which leverages complementary knowledge from multiple models to collaboratively search and identify effective reasoning paths. In addition, recent works [14, 27, 30, 47] attempt to explore online reinforcement learning to improve the MLLMs' reasoning ability. Different from these works, we design StepGRPO that enables MLLM to self-improve the reasoning ability with step-wise reward signals." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 199, + 194, + 212 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 199, + 194, + 212 + ], + "spans": [ + { + "bbox": [ + 55, + 199, + 194, + 212 + ], + "type": "text", + "content": "2.3. Reinforcement Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 216, + 295, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 216, + 295, + 360 + ], + "spans": [ + { + "bbox": [ + 55, + 216, + 295, + 360 + ], + "type": "text", + "content": "Reinforcement Learning (RL) [16] is a fundamental approach in machine learning, where an agent learns to interact with an environment by taking actions, receiving rewards, and updating its policy to maximize the long-term return. With the rise of large language models (LLMs) [4, 28, 31], Reinforcement Learning with Human Feedback (RLHF) [3] has emerged as a key technique for fine-tuning models using human preference data. RLHF leverages algorithms like Proximal Policy Optimization (PPO) [33] and Direct Preference Optimization (DPO) [32] to guide model behavior for improving the alignment, coherence and helpfulness in response generation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 360, + 295, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 360, + 295, + 635 + ], + "spans": [ + { + "bbox": [ + 55, + 360, + 295, + 635 + ], + "type": "text", + "content": "Recently, RL is increasingly adopted to enhance LLMs' reasoning capabilities [5, 7, 13, 24, 36, 50], especially for mathematical problem solving. The core is to adopt an appropriate reward function or model that evaluates and reinforces high-quality reasoning paths while penalizing low-quality ones, guiding the model's optimization towards more structured and coherent reasoning trajectories using the RL algorithm. For example, ReST-MCTS* [50] trains a process reward model (PRM) for determining the correctness of each reasoning step within reasoning paths. Recent methods have found that using a simple outcome-level rule-based reward function (i.e., the reasoning trajectories leading to correct answer are rewarded with higher score) can already provide an effective and reliable reward signal during the RL process [13, 24, 36]. For example, DeepSeek-R1 [13] demonstrates that group relative policy optimization (GRPO) [34] with outcome-level reward effectively enhances the reasoning capability of LLMs. In this work, we aim for improving the reasoning capability of MLLMs through reinforcement learning and propose StepGRPO, which effectively tackles the sparse reward issue in MLLMs, leading to stable training process and better reasoning capability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 645, + 111, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 645, + 111, + 658 + ], + "spans": [ + { + "bbox": [ + 55, + 645, + 111, + 658 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 665, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 295, + 714 + ], + "type": "text", + "content": "This section first presents the task formulation, and then introduces the proposed Step-wise Group Relative Policy Optimization (StepGRPO). More details to be elaborated in the ensuing subsections." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 72, + 421, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 421, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 421, + 83 + ], + "type": "text", + "content": "3.1. Task Formulation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": "In this paper, we consider a pre-trained MLLM and denote it as a policy model " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": ". Given a multimodal question " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " consisting of an image and a textual task instruction, i.e., " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "Q = \\{\\text{text}, \\text{image}\\}" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": ", the policy model " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " generates response " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{c}" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " with a step-by-step reasoning trajectory. Generally, this process can be formulated as a sequence of next token prediction actions, i.e., " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "\\mathbf{c} = (a_1, a_2, \\dots, a_t, \\dots, a_T)" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": ", where each action " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "a_t" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " is sampled from the policy model " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " represents the maximum sequence length. After each action, the new state " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "s_{t+1}" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " is determined by updating the current state " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": " with the newly generated action " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "a_t" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "s_{t+1} = (s_t, a_t)" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "inline_equation", + "content": "1 \\leq t \\leq T" + }, + { + "bbox": [ + 313, + 89, + 553, + 233 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "spans": [ + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "text", + "content": "Considering this formulation, the objective of our task is to optimize the policy model " + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "text", + "content": " such that it can select better actions based on the previous states, thereby improving reasoning quality. In the context of reinforcement learning (RL), the policy model is generally optimized by maximizing the cumulative reward, where the reward for taking action " + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "text", + "content": " at state " + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "text", + "content": " is denoted as " + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "inline_equation", + "content": "r(s_t,a_t,s_{t + 1})" + }, + { + "bbox": [ + 313, + 233, + 554, + 353 + ], + "type": "text", + "content": ". Following prior studies [46], we define an action in this paper as generating a reasoning step, which consists of one or more sentences containing multiple word tokens." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 359, + 552, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 359, + 552, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 359, + 552, + 372 + ], + "type": "text", + "content": "3.2. Step-wise Group Relative Policy Optimization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 376, + 554, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 554, + 484 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 554, + 484 + ], + "type": "text", + "content": "We propose Step-wise Group Relative Policy Optimization (StepGRPO), a novel online reinforcement fine-tuning framework that mitigates the sparse reward issue for MLLMs and encourages self-improvement in reasoning ability through simple, effective and dense step-wise reward mechanisms. As illustrated in Fig. 2, StepGRPO consists of two phases: (1) a policy warm-up phase and (2) a step-wise online policy optimization phase. The overall algorithm is shown in Algorithm 1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 491, + 411, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 491, + 411, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 411, + 502 + ], + "type": "text", + "content": "3.2.1. Policy Warm-up" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "spans": [ + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "text", + "content": "This phase equips the policy model with fundamental reasoning capabilities, ensuring it can generate proper stepwise reasoning paths before reinforcement learning. During the warm-up phase, the policy model is fine-tuned using a multimodal dataset " + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "inline_equation", + "content": "D_{s}" + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "text", + "content": " with Chain-of-Thought (CoT) reasoning path, where each data consists of a multimodal question " + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "text", + "content": " and a step-by-step reasoning path " + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "inline_equation", + "content": "D_{s} = \\{Q^{n}, \\tau^{n}\\}_{n=1}^{N}" + }, + { + "bbox": [ + 313, + 506, + 553, + 602 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 344, + 609, + 553, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 609, + 553, + 643 + ], + "spans": [ + { + "bbox": [ + 344, + 609, + 553, + 643 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {w a r m - u p}} = - \\mathbb {E} _ {\\tau \\sim D _ {s}} [ \\sum_ {t = 1} ^ {T} \\log (\\pi_ {\\theta} (a _ {t} | s _ {t})) ]. \\tag {1}", + "image_path": "31ea3d8e55752229dfade1dd6643121292be3f7cec00e13905b27dd70e12926c.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 651, + 501, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 651, + 501, + 662 + ], + "spans": [ + { + "bbox": [ + 313, + 651, + 501, + 662 + ], + "type": "text", + "content": "3.2.2. Step-wise Online Policy Optimization" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 665, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 553, + 714 + ], + "type": "text", + "content": "This phase enables MLLMs to self-improve their reasoning ability via online reinforcement learning, mitigating the sparse reward issue through step-wise reasoning rewards. As illustrated in Fig. 2, for each question " + }, + { + "bbox": [ + 313, + 665, + 553, + 714 + ], + "type": "inline_equation", + "content": "Q \\in D_{s}" + }, + { + "bbox": [ + 313, + 665, + 553, + 714 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 70, + 126, + 116 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 126, + 116 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 126, + 116 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 126, + 116 + ], + "type": "image", + "image_path": "505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 76, + 242, + 114 + ], + "lines": [ + { + "bbox": [ + 130, + 76, + 242, + 114 + ], + "spans": [ + { + "bbox": [ + 130, + 76, + 242, + 114 + ], + "type": "text", + "content": "Question: In the given diagram, triangle ABC has AD as its median and point E is the midpoint of AD. If the area of triangle ABC is 12, what is the area of triangle ABE?" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 61, + 121, + 547, + 244 + ], + "blocks": [ + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "lines": [ + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "spans": [ + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "type": "text", + "content": "Answer: Step 1: Since AD is a median, it divides triangle ABC into two equal areas: ABD and ACD. Step 2: Segment AE is half of AD, splitting triangle ABD into two triangles of equal area: ABE and BED. Step 3: The area of triangle ABD is half of triangle ABC, which is " + }, + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "type": "inline_equation", + "content": "\\frac{\\text{frac}}{12} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 6" + }, + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "type": "text", + "content": ". Step 4: Since E is the midpoint of AD, triangle ABE is half of triangle ABD. Therefore, the area of triangle ABE is " + }, + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "type": "inline_equation", + "content": "\\frac{\\text{frac}}{6} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 3" + }, + { + "bbox": [ + 259, + 75, + 542, + 114 + ], + "type": "text", + "content": ". The final answer is 3." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 121, + 547, + 244 + ], + "lines": [ + { + "bbox": [ + 61, + 121, + 547, + 244 + ], + "spans": [ + { + "bbox": [ + 61, + 121, + 547, + 244 + ], + "type": "image", + "image_path": "7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 248, + 200, + 258 + ], + "lines": [ + { + "bbox": [ + 61, + 248, + 200, + 258 + ], + "spans": [ + { + "bbox": [ + 61, + 248, + 200, + 258 + ], + "type": "text", + "content": "(a) Step-wise Reasoning Accuracy Reward" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "lines": [ + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "spans": [ + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "text", + "content": "Figure 2. Overview of the proposed StepGRPO. StepGRPO consists of two phases: a policy warm-up phase and a step-wise online policy optimization phase. After the warm-up, the policy model " + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "text", + "content": " generates a group of reasoning paths " + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}^i\\}_{i=1}^M" + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "text", + "content": " and assigns step-wise rewards using two proposed mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards reasoning paths that contain key intermediate steps, identified using a soft key-step matching technique. StepRVR rewards reasoning paths based on completeness and logical consistency, ensuring they are well-structured. StepGRPO then estimates the advantage " + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "inline_equation", + "content": "\\hat{A}" + }, + { + "bbox": [ + 54, + 349, + 555, + 427 + ], + "type": "text", + "content": " for policy optimization by using the average step-wise reasoning reward of a group of sampled reasoning paths as a baseline. Examples for StepRAR and StepRVR are illustrated in (a) and (b), respectively." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 65, + 262, + 159, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 262, + 159, + 281 + ], + "spans": [ + { + "bbox": [ + 65, + 262, + 159, + 281 + ], + "type": "text", + "content": "Pre-extracted key steps with Augmentations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 283, + 159, + 333 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 64, + 283, + 159, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 283, + 159, + 290 + ], + "spans": [ + { + "bbox": [ + 64, + 283, + 159, + 290 + ], + "type": "text", + "content": "1. AD is a median; median is " + }, + { + "bbox": [ + 64, + 283, + 159, + 290 + ], + "type": "inline_equation", + "content": "AD" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 64, + 292, + 112, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 292, + 112, + 299 + ], + "spans": [ + { + "bbox": [ + 64, + 292, + 112, + 299 + ], + "type": "text", + "content": "2. equal area; ..." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 64, + 300, + 156, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 300, + 156, + 308 + ], + "spans": [ + { + "bbox": [ + 64, + 300, + 156, + 308 + ], + "type": "text", + "content": "3. AE is half of AD; " + }, + { + "bbox": [ + 64, + 300, + 156, + 308 + ], + "type": "inline_equation", + "content": "AE = 1 / 2AD" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 64, + 308, + 156, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 308, + 156, + 316 + ], + "spans": [ + { + "bbox": [ + 64, + 308, + 156, + 316 + ], + "type": "text", + "content": "4. frac{12}{2} {2} = 6; " + }, + { + "bbox": [ + 64, + 308, + 156, + 316 + ], + "type": "inline_equation", + "content": "\\underline{12 / 2} = 6,\\dots" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 64, + 317, + 129, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 317, + 129, + 324 + ], + "spans": [ + { + "bbox": [ + 64, + 317, + 129, + 324 + ], + "type": "text", + "content": "5. E is the midpoint; .." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 64, + 325, + 148, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 325, + 148, + 333 + ], + "spans": [ + { + "bbox": [ + 64, + 325, + 148, + 333 + ], + "type": "text", + "content": "6. frac{6}{2} = 3; 6/2 = 3. ..." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 168, + 262, + 248, + 271 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 262, + 248, + 271 + ], + "spans": [ + { + "bbox": [ + 168, + 262, + 248, + 271 + ], + "type": "text", + "content": "Soft key-step matching :" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 168, + 271, + 318, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 271, + 318, + 335 + ], + "spans": [ + { + "bbox": [ + 168, + 271, + 318, + 335 + ], + "type": "text", + "content": "Description: The image shows ...; #Rationale: The question asks for the area...; #Step1: ... we find AD is a median of ...; #Step2: ... AE splits triangle ABD ...; #Step3: ... The area of triangle ABD is " + }, + { + "bbox": [ + 168, + 271, + 318, + 335 + ], + "type": "inline_equation", + "content": "12/2 = 6" + }, + { + "bbox": [ + 168, + 271, + 318, + 335 + ], + "type": "text", + "content": ", ..., and the area of triangle ABE is frac{6}{2} = 3. #The final answer is: 3. Step-wise Matching score: 3/6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 323, + 249, + 457, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 249, + 457, + 258 + ], + "spans": [ + { + "bbox": [ + 323, + 249, + 457, + 258 + ], + "type": "text", + "content": "(b) Step-wise Reasoning Validity Reward" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "spans": [ + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "content": "Description " + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "content": " #Rationale " + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "content": " # Step1 " + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "content": " ... " + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "content": " #Step " + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "inline_equation", + "content": "N\\rightarrow" + }, + { + "bbox": [ + 326, + 262, + 542, + 272 + ], + "type": "text", + "content": " #Answer." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 326, + 274, + 409, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 274, + 409, + 282 + ], + "spans": [ + { + "bbox": [ + 326, + 274, + 409, + 282 + ], + "type": "text", + "content": "i. Reasoning completeness" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "spans": [ + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "type": "text", + "content": "Description " + }, + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "type": "text", + "content": " #Rationale " + }, + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 283, + 537, + 293 + ], + "type": "text", + "content": " #Answer. Missing reasoning steps" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "spans": [ + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "text", + "content": "Description " + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "text", + "content": " # Step1 " + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "text", + "content": " ... " + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "text", + "content": " #Step " + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "inline_equation", + "content": "N\\rightarrow" + }, + { + "bbox": [ + 326, + 293, + 548, + 304 + ], + "type": "text", + "content": " #Answer. Missing rationale" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 326, + 306, + 385, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 306, + 385, + 315 + ], + "spans": [ + { + "bbox": [ + 326, + 306, + 385, + 315 + ], + "type": "text", + "content": "ii. Reasoning logic" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "spans": [ + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": "Description " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Rationale " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Answer " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Step1... " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #StepN. X \n#Description " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Step3 " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Rationale " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " ... " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Step I " + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 326, + 315, + 542, + 335 + ], + "type": "text", + "content": " #Answer X" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "content": "the policy model " + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "content": " first generates a group of " + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "content": " reasoning trajectories via multiple rollouts, i.e., " + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}^i\\}_{i=1}^M" + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{c}^i = (a_1^i, a_2^i, \\ldots, a_t^i, \\ldots, a_T^i)" + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "content": ". After obtaining a group of " + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 54, + 448, + 295, + 555 + ], + "type": "text", + "content": " reasoning trajectories, we employ our proposed step-wise reasoning rewards to evaluate and reward each generated reasoning trajectory. Specifically, we introduce two types of rule-based step-wise rewards, i.e., step-wise reasoning accuracy (StepRAR) reward and step-wise reasoning validity reward (StepRVR)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "text", + "content": "Step-wise reasoning accuracy reward (StepRAR) reduces the effect of learning from sparse reward by additionally rewarding reasoning paths that contain correct intermediate reasoning steps contributing to the final solution. Specifically, for each question " + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "text", + "content": ", we pre-extract a set of key reasoning steps " + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}" + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "text", + "content": " from the corresponding reasoning path " + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "text", + "content": " in dataset " + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "inline_equation", + "content": "D_{s}" + }, + { + "bbox": [ + 54, + 558, + 295, + 713 + ], + "type": "text", + "content": ". We define key steps as the essential variables and equations that directly contribute to the final solution, and prompt GPT-4 to extract several key steps from the reasoning path for each question. To ensure efficient reward assignment, we refine the extracted steps by removing redundant content and retaining only the core few words necessary for reasoning. Furthermore, we" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 448, + 555, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 448, + 555, + 519 + ], + "spans": [ + { + "bbox": [ + 313, + 448, + 555, + 519 + ], + "type": "text", + "content": "augment each extracted key step into multiple equivalent formats to allow more flexible and accurate matching, preventing missed matches due to math-related formatting differences. For example, a mathematical expression such as \"" + }, + { + "bbox": [ + 313, + 448, + 555, + 519 + ], + "type": "inline_equation", + "content": "\\frac{6}{3} = 2" + }, + { + "bbox": [ + 313, + 448, + 555, + 519 + ], + "type": "text", + "content": "\" is augmented to \"6/3 = 2\" or \"6 divided by 3 equals 2\"." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "spans": [ + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "text", + "content": "With the extracted key reasoning steps " + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}" + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "text", + "content": " and such soft marching mechanism, we calculate a match score for each generated reasoning path based on the ratio of matched key steps, i.e., " + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "inline_equation", + "content": "k^{i} = |\\mathbf{v}_{\\text{match}}| / |\\mathbf{v}|" + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "text", + "content": ". Then, StepRAR for " + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "inline_equation", + "content": "1 \\leq t \\leq T" + }, + { + "bbox": [ + 313, + 519, + 555, + 579 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 586, + 553, + 641 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 586, + 553, + 641 + ], + "spans": [ + { + "bbox": [ + 317, + 586, + 553, + 641 + ], + "type": "interline_equation", + "content": "r _ {a u c} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1 + \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = y, \\\\ \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) \\neq \\text {n u l l}, \\neq y, \\\\ 0, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = \\text {n u l l}, \\end{array} \\right. \\tag {2}", + "image_path": "f8c2153c05b6d636fd93e2d9701b86feb20c9c13112ae8afc8e353930bce0932.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 642, + 553, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 553, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 553, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 642, + 553, + 666 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 642, + 553, + 666 + ], + "type": "text", + "content": " is the ground-truth answer extracted from CoT reasoning path." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 666, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 554, + 714 + ], + "type": "text", + "content": "By leveraging pre-extracted key reasoning steps, StepRAR efficiently provides additional supervision with a simple soft matching mechanism, ensuring the model learns meaningful reasoning processes instead of guessing" + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 133, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 133, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 133, + 83 + ], + "type": "text", + "content": "answers randomly." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 84, + 296, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 84, + 296, + 192 + ], + "spans": [ + { + "bbox": [ + 55, + 84, + 296, + 192 + ], + "type": "text", + "content": "Step-wise reasoning validity reward (StepRVR) aims for ensuring the generated paths adhere to a logically structured and coherent progression beyond the reasoning accuracy. Prior studies [44, 46] have demonstrated structural reasoning, such as problem decomposition and progressive reasoning, facilitates more accurate and interpretable reasoning processes, as they encourage models to break down complex problems into multiple intermediate steps rather than direct answer generation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "spans": [ + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "type": "text", + "content": "Inspired by these findings, we incorporate step-wise reasoning validity to reinforce well-organized reasoning paths that follow an expected logical flow. Specifically, we define StepRVR using two key criteria: reasoning completeness " + }, + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "type": "inline_equation", + "content": "\\delta^c" + }, + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "type": "text", + "content": " and reasoning logic " + }, + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "type": "inline_equation", + "content": "\\delta^l" + }, + { + "bbox": [ + 55, + 194, + 296, + 349 + ], + "type": "text", + "content": ". Reasoning completeness requires the response to include three essential components, i.e., a background analysis involving image description and rationale analysis to establish context, a step-by-step reasoning process and a final answer. In addition to the reasoning completeness, reasoning logic ensures the reasoning path to follow a logical progression, where the background analysis must come before solution steps and the final answer should only appear after reasoning steps are complete." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 350, + 256, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 350, + 256, + 361 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 256, + 361 + ], + "type": "text", + "content": "With these two criteria, we define StepRVR as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 372, + 296, + 413 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 372, + 296, + 413 + ], + "spans": [ + { + "bbox": [ + 59, + 372, + 296, + 413 + ], + "type": "interline_equation", + "content": "r _ {v a l} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\mathbb {I} \\left(\\delta^ {c} \\left(s _ {t + 1}\\right)\\right) \\cdot \\mathbb {I} \\left(\\delta^ {l} \\left(s _ {t + 1}\\right)\\right) = 1, \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3}", + "image_path": "6b3c67a4c50ce11940655a5fb86d1d6562af7aedeea159567fe508f24e38ba79.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 414, + 296, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 414, + 296, + 474 + ], + "spans": [ + { + "bbox": [ + 55, + 414, + 296, + 474 + ], + "type": "text", + "content": "where the reasoning trajectory is rewarded only if it satisfies both completeness and logical coherence. By enforcing this, StepRVR helps the model produce structured, interpretable and logically sound reasoning trajectories, enhancing both the quality and reliability of generated responses." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "spans": [ + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "type": "text", + "content": "Optimization with the step-wise rewards. After obtaining two types of step-wise rewards, we compute the overall reward for each reasoning path as " + }, + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "type": "inline_equation", + "content": "r^i = r_{auc}^i + r_{val}^i" + }, + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "type": "text", + "content": ", and repeatedly compute the rewards for all generated reasoning paths, i.e., " + }, + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "type": "inline_equation", + "content": "\\{r^1, r^2, \\dots, r^M\\}" + }, + { + "bbox": [ + 55, + 475, + 296, + 535 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 536, + 295, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 536, + 295, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 536, + 295, + 559 + ], + "type": "text", + "content": "To estimate the advantage of each reasoning trajectory, we normalize its reward relative to the group as follow:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 102, + 568, + 295, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 568, + 295, + 597 + ], + "spans": [ + { + "bbox": [ + 102, + 568, + 295, + 597 + ], + "type": "interline_equation", + "content": "\\hat {A} ^ {i} = \\frac {r ^ {i} - \\operatorname {m e a n} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}, \\tag {4}", + "image_path": "89c21106dadd9e892de897a3997bdb6531f3aa0bde3862bde14835d4ccdfd1d5.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "spans": [ + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "type": "text", + "content": "where the mean group reward serves as the baseline, and " + }, + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "type": "inline_equation", + "content": "\\hat{A}_i" + }, + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "type": "text", + "content": " measures how much better or worse " + }, + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 55, + 608, + 296, + 657 + ], + "type": "text", + "content": " is compared to other reasoning trajectories within the group. Following this, we optimize the policy model with the loss defined as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 667, + 295, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 667, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 71, + 667, + 295, + 715 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {S t e p R L}} = - \\underset {Q \\in D _ {s}} {\\mathbb {E}} \\left[ \\frac {1}{M} \\sum_ {i = 1} ^ {M} \\left(\\frac {\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\left[ \\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right) \\right] _ {\\text {n o g r a d}}} \\hat {A} ^ {i} \\right. \\right. \\tag {5} \\\\ - \\beta D _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right) ], \\\\ \\end{array}", + "image_path": "1558e8c3b9e8da8ef6634dfd8880bbdd56fd4f2cc1fe7590b5502e41bf971d92.jpg" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 316, + 86, + 553, + 295 + ], + "blocks": [ + { + "bbox": [ + 315, + 72, + 553, + 84 + ], + "lines": [ + { + "bbox": [ + 315, + 72, + 553, + 84 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 553, + 84 + ], + "type": "text", + "content": "Algorithm 1 Step-wise Group Relative Policy Optimization" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "lines": [ + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "spans": [ + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": "Input: Policy model " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " initialized by a pre-trained \nMLLM; a multimodal dataset " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "D_{s} = \\{Q^{n},\\tau^{n}\\}_{n = 1}^{N}" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " \nOutput: Trained policy model " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " \nPolicy warm-up: \nfor iter " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "= 1" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " do Sample " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\{Q,\\tau \\} \\in D_s" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " Optimize policy model " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " by Eq. 1 \nend for \nStep-wise online policy optimization: \nfor iter " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "= 1" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " do Sample " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\{Q,\\tau \\} \\in D_s" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " Generate a group of reasoning paths " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{c}^i\\}_{i = 1}^M\\sim \\pi_\\theta" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " Obtain step-wise rewards " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\{r^i\\}_{i = 1}^M" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " by Eqs. 2-3 Obtain relative advantages " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\{\\hat{A}^i\\}_{i = 1}^M" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " by Eq. 4 Optimize policy model " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "text", + "content": " by Eqs. 5-6 \nend for \nreturn policy model " + }, + { + "bbox": [ + 316, + 86, + 553, + 295 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "algorithm" + }, + { + "bbox": [ + 313, + 315, + 555, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 315, + 555, + 388 + ], + "spans": [ + { + "bbox": [ + 313, + 315, + 555, + 388 + ], + "type": "text", + "content": "where KL divergence is adopted to regularize the policy model, preventing excessive deviation from the reference model. The reference model is typically initialized as the same model as the policy model but remains frozen during RL training. The KL divergence between the policy model and the reference model is estimated as in [34]:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 319, + 395, + 555, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 395, + 555, + 422 + ], + "spans": [ + { + "bbox": [ + 319, + 395, + 555, + 422 + ], + "type": "interline_equation", + "content": "D _ {K L} \\left(\\pi_ {\\theta} \\right\\| \\pi_ {r e f} = \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - \\log \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - 1. \\tag {6}", + "image_path": "cc8691112c299eff8cc7beb85c16c3122f1c04b330a8141f6be8b51d0884c159.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 429, + 391, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 429, + 391, + 442 + ], + "spans": [ + { + "bbox": [ + 314, + 429, + 391, + 442 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 449, + 555, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 497 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 497 + ], + "type": "text", + "content": "This section presents experiments including datasets and implementation details, main experimental results, ablation studies and discussion, respectively. More details are to be described in the ensuing subsections." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 504, + 376, + 515 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 504, + 376, + 515 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 376, + 515 + ], + "type": "text", + "content": "4.1. Datasets" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 521, + 555, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 555, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 555, + 665 + ], + "type": "text", + "content": "For policy warm-up, we adopt Mulberry-260k [46] for supervised fine-tuning. For step-wise online policy optimization, we randomly sample 10K data from Mulberry-260k as our training data. For evaluation, we adopt 8 widely-used multimodal benchmarks for comprehensively evaluating our proposed StepGRPO, including MathVista [23], MMStar [6], Math-Vision [40], ChartQA [26], DynaMath [57], HallusionBench [12], MathVerse [54], MME [11] and MM-Reason [45]. These multimodal benchmarks cover a wide range of tasks from mathematical reasoning, chart understanding, visual hallucination and general visual understanding." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 672, + 447, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 672, + 447, + 684 + ], + "spans": [ + { + "bbox": [ + 313, + 672, + 447, + 684 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": "Our proposed StepGRPO is generally applicable to different MLLMs. In our experiments, we adopt two state-of-the-art" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 70, + 558, + 385 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 558, + 385 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 558, + 385 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 558, + 385 + ], + "type": "table", + "html": "
MethodMathVistaMMStarMath-VChartQADynaMathHallBenchMathVerseMMEsumMMReasonAVG
Closed-Source Model
GPT-4o [15]63.863.930.385.763.755.039.4232921.156.2
Claude-3.5 Sonnet [1]67.762.2-90.864.855.0-1920--
Open-Source Model
Cambrain-1-8B [38]49.0--73.3------
MM-1.5-7B [51]47.6--78.6---1861--
Idefics3-LLaMA3-8B [18]58.455.9-74.8---1937--
InternVL2-8B [8]58.361.5-83.339.7--2210--
MiniCPM-V-2.6-8B [48]60.657.5---48.1-2348--
DeepSeek-VL2-MOE-4.5B [43]62.861.3-86.0---225311.5-
Reasoning Model
LLaVA-CoT-11B [44]54.857.6---47.8----
LLaVA-Reasoner-8B [55]50.654.0-83.0------
Insight-V-8B [10]49.857.4-77.4---2069--
Mulberry-7B [46]63.161.3-83.945.154.1-239611.8-
LlamaV-o1-11B [37]54.459.4---63.5----
Vision-R1-7B [14]73.5-----52.4---
LMM-R1 [30]63.258.026.3---41.5---
R1-ShareVL-7B [47]75.467.029.5---52.8---
Qwen2-VL-2B [41]43.048.012.473.524.941.719.718727.737.5
R1-VL-2B (Ours)52.149.817.175.229.444.026.220488.341.6
Qwen2-VL-7B [41]58.260.716.383.042.150.632.5232711.948.7
R1-VL-7B (Ours)63.560.024.783.945.254.740.0237612.552.1
Qwen2.5-VL-7B [2]68.263.925.187.353.252.149.2234717.355.5
R1-VL-7B* (Ours)74.366.228.287.756.557.252.2239517.958.4
", + "image_path": "08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 62, + 446, + 293, + 540 + ], + "blocks": [ + { + "bbox": [ + 55, + 394, + 555, + 429 + ], + "lines": [ + { + "bbox": [ + 55, + 394, + 555, + 429 + ], + "spans": [ + { + "bbox": [ + 55, + 394, + 555, + 429 + ], + "type": "text", + "content": "Table 1. Main experimental results. To comprehensively examine the proposed StepGRPO, we conduct extensive experiments with two baseline models on eight benchmarks, and compare StepGRPO with various state-of-the-art MLLMs.* indicates that the model is trained using Qwen2.5-VL-7B as the base model with the data from [47]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 62, + 446, + 293, + 540 + ], + "lines": [ + { + "bbox": [ + 62, + 446, + 293, + 540 + ], + "spans": [ + { + "bbox": [ + 62, + 446, + 293, + 540 + ], + "type": "table", + "html": "
Warm-upStep-wise reasoning rewardsMathVista
StepRARStepRVR
58.2
61.2
62.4
61.9
63.5
", + "image_path": "5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 548, + 283, + 559 + ], + "lines": [ + { + "bbox": [ + 67, + 548, + 283, + 559 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 283, + 559 + ], + "type": "text", + "content": "Table 2. Ablation study of StepGRPO over Qwen2-VL-7B." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "spans": [ + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "type": "text", + "content": "open-source MLLMs, i.e., Qwen2-VL-2B and Qwen2-VL-7B [41]. For the policy warm-up phase, we set the training batch size to 128. Following prior work [46], we use a learning rate of " + }, + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "type": "inline_equation", + "content": "1\\mathrm{e}^{-5}" + }, + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "type": "text", + "content": " for Qwen2-VL-2B and " + }, + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "type": "inline_equation", + "content": "5\\mathrm{e}^{-6}" + }, + { + "bbox": [ + 55, + 581, + 295, + 641 + ], + "type": "text", + "content": " for Qwen2-VL-7B, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "For the step-wise online policy optimization phase, we perform 4 rollouts per question " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "(M = 4)" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": " and set the sampling temperature to 1.2 to encourage diverse reasoning paths. The maximum sequence length is set to " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "L = 1024" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": ", ensuring that the model can generate complete reasoning paths. Both the policy model and reference model are ini" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "text", + "content": "tialized from the model after the warm-up, with the reference model frozen during RL training. The policy model's learning rate is " + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "inline_equation", + "content": "1\\mathrm{e}^{-6}" + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "text", + "content": ", and we set the batch size to 4. We set the coefficient of match score " + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "text", + "content": " to 0.1 to balance its effect. Following [39], the KL divergence coefficient " + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 449, + 555, + 533 + ], + "type": "text", + "content": " in Eq. 5 is set to 0.04 by default. All experiments are conducted on 4 H100-80GB GPUs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 540, + 466, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 466, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 466, + 553 + ], + "type": "text", + "content": "4.3. Main Experimental Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 558, + 555, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 558, + 555, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 558, + 555, + 593 + ], + "type": "text", + "content": "We conduct a comprehensive evaluation of R1-VL across eight widely used benchmarks, comparing it with various state-of-the-art MLLMs, as shown in Table 1." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "type": "text", + "content": "We first compare R1-VL with its baseline models, Qwen2-VL-2B and Qwen2-VL-7B. The baseline models exhibit limited reasoning capability, leading to very few reasoning paths receiving rewards, which negatively impacts the reasoning capability. In contrast, R1-VL with our proposed StepGRPO consistently improves the baseline models by significant margins, achieving " + }, + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "type": "inline_equation", + "content": "4.6\\%" + }, + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "type": "text", + "content": " improvement over Qwen2-VL-2B and " + }, + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "type": "inline_equation", + "content": "3.8\\%" + }, + { + "bbox": [ + 312, + 594, + 556, + 715 + ], + "type": "text", + "content": " over Qwen2-VL-7B. This improvement is largely attributed to that StepGRPO introduces step-wise reasoning accuracy and validity rewards," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 294, + 122 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 294, + 122 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 294, + 122 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 294, + 122 + ], + "type": "table", + "html": "
Number of generations M per question
Method23456
R1-VL-7B62.562.863.563.263.7
", + "image_path": "30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 174, + 295, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 174, + 295, + 209 + ], + "spans": [ + { + "bbox": [ + 55, + 174, + 295, + 209 + ], + "type": "text", + "content": "which provide rich and informative supervision at each reasoning step, effectively mitigating the sparse reward issue for MLLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "spans": [ + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "text", + "content": "In addition, we compare R1-VL with existing state-of-the-art reasoning MLLMs. As shown in Table 1, R1-VL achieves better performance on most benchmarks, particularly in mathematical reasoning tasks. For example, R1-VL-7B surpasses Mulberry-7B and LlamaV-o1-11B by " + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "inline_equation", + "content": "0.6\\%" + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "inline_equation", + "content": "9.3\\%" + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "text", + "content": " respectively on the reasoning-intensive benchmark MathVista. Notably, R1-VL-2B even outperforms larger MLLMs. For instance, R1-VL-2B largely outperforms LLaVA-Reasoner-8B and LLaVA-CoT-11B by " + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "inline_equation", + "content": "13.1\\%" + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "inline_equation", + "content": "9.3\\%" + }, + { + "bbox": [ + 54, + 210, + 295, + 377 + ], + "type": "text", + "content": " on MathVista, respectively. This superior performance demonstrates that StepGRPO effectively enhances MLLMs' reasoning abilities by encouraging self-improvement via step-wise online reinforcement learning, rather than merely imitating positive reasoning paths." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 378, + 295, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 378, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 54, + 378, + 295, + 498 + ], + "type": "text", + "content": "Additionally, we benchmark R1-VL against general MLLMs, including closed-source models such as GPT-4o and Claude-3.5 Sonnet, as well as open-source models like Cambrain-1-8B and DeepSeek-VL2-MOE-4.5B. We observe that R1-VL outperforms most open-source MLLMs and achieves competitive results against closed-source models. For example, R1-VL-7B achieves 63.7 accuracy on MathVista, closely matching GPT-4o's accuracy of 63.8. These results further validate StepGRPO's effectiveness in enhancing the reasoning capabilities of MLLMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 505, + 149, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 505, + 149, + 517 + ], + "spans": [ + { + "bbox": [ + 55, + 505, + 149, + 517 + ], + "type": "text", + "content": "4.4. Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "type": "text", + "content": "We conduct ablation studies for StepGRPO on Qwen2-VL-7B over MathVista benchmark for examining the effect of step-wise reasoning rewards including step-wise reasoning accuracy reward (StepRAR) and step-wise reasoning validity reward (StepRVR), as well as the role of the warm-up phase. As shown in Table 2, involving a warm-up stage improves baseline model to " + }, + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "type": "inline_equation", + "content": "61.2\\%" + }, + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "type": "text", + "content": ", allowing the model to learn basic reasoning knowledge before reinforcement learning. In addition, including either StepRAR or StepRVR into the online reinforcement learning process outperforms the model with warm-up by large margins, demonstrating that both two types of step-wise rewards contribute to enhancing step-by-step reasoning capabilities. The best performance (i.e., " + }, + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "type": "inline_equation", + "content": "63.7\\%" + }, + { + "bbox": [ + 54, + 521, + 295, + 714 + ], + "type": "text", + "content": ") is achieved when both StepRAR and StepRVR are applied together. This shows that StepGRPO effectively improves complex" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 317, + 70, + 553, + 129 + ], + "blocks": [ + { + "bbox": [ + 55, + 131, + 295, + 153 + ], + "lines": [ + { + "bbox": [ + 55, + 131, + 295, + 153 + ], + "spans": [ + { + "bbox": [ + 55, + 131, + 295, + 153 + ], + "type": "text", + "content": "Table 3. Parameter analysis of " + }, + { + "bbox": [ + 55, + 131, + 295, + 153 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 55, + 131, + 295, + 153 + ], + "type": "text", + "content": ". The experiments are conducted on Qwen2-VL-7B over MathVista." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 70, + 553, + 129 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 553, + 129 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 553, + 129 + ], + "type": "table", + "html": "
MethodMathVista
Warm-up61.7
Warm-up + Outcome-level reward62.3
Warm-up + Step-wise reward (Ours)63.5
", + "image_path": "c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 138, + 555, + 160 + ], + "lines": [ + { + "bbox": [ + 313, + 138, + 555, + 160 + ], + "spans": [ + { + "bbox": [ + 313, + 138, + 555, + 160 + ], + "type": "text", + "content": "Table 4. Effectiveness of the step-wise reasoning rewards. The experiments are conducted on Qwen2-VL-7B over MathVista." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 329, + 174, + 525, + 311 + ], + "blocks": [ + { + "bbox": [ + 329, + 174, + 525, + 311 + ], + "lines": [ + { + "bbox": [ + 329, + 174, + 525, + 311 + ], + "spans": [ + { + "bbox": [ + 329, + 174, + 525, + 311 + ], + "type": "image", + "image_path": "342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 322, + 555, + 344 + ], + "lines": [ + { + "bbox": [ + 313, + 322, + 555, + 344 + ], + "spans": [ + { + "bbox": [ + 313, + 322, + 555, + 344 + ], + "type": "text", + "content": "Figure 3. Comparison between StepGRPO and SFT. The experiments are conducted on Qwen2-VL-7B over MathVista." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 365, + 554, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 365, + 554, + 401 + ], + "spans": [ + { + "bbox": [ + 313, + 365, + 554, + 401 + ], + "type": "text", + "content": "reasoning tasks by reinforcing both the correctness of intermediate steps and the overall logical structure of the reasoning process." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 409, + 387, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 409, + 387, + 421 + ], + "spans": [ + { + "bbox": [ + 313, + 409, + 387, + 421 + ], + "type": "text", + "content": "4.5. Discussion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": "Parameter analysis. We conduct the parameter analysis on the number of generations " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " in a group with Qwen2-VL7B over benchmark MathVista, analyzing its impact on reasoning performance. As described in Section 3, " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " controls the number of generated reasoning trajectories per question during the RL phase. Table 3 shows that a larger " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " generally leads to better performance. This is because, in group relative optimization, the baseline reward is estimated as the average reward of all generated reasoning paths. A larger " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " results in a more stable and accurate baseline estimation, whereas a small " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " may lead to high variance in baseline estimation, making the optimization process less reliable. However, increasing " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " also introduces higher computational costs. Therefore, we set " + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "inline_equation", + "content": "M = 4" + }, + { + "bbox": [ + 313, + 426, + 555, + 605 + ], + "type": "text", + "content": " as the default to balance performance and computational efficiency." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "type": "text", + "content": "Effectiveness of the step-wise reward. Our proposed stepwise reward mechanism plays a crucial role in mitigating the sparse reward issue by providing fine-grained supervision at each reasoning step. To further validate its effectiveness, we conduct an experiment comparing outcome-level reward against our step-wise reward. Specifically, we evaluate three settings: (1) Warm-up only; (2) Warm-up + Outcome-level Reward, where the model is optimized with outcome-level reward; and (3) Warm-up + Step-wise Re" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 123, + 159, + 194 + ], + "blocks": [ + { + "bbox": [ + 59, + 123, + 159, + 194 + ], + "lines": [ + { + "bbox": [ + 59, + 123, + 159, + 194 + ], + "spans": [ + { + "bbox": [ + 59, + 123, + 159, + 194 + ], + "type": "image", + "image_path": "e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 63, + 206, + 154, + 231 + ], + "lines": [ + { + "bbox": [ + 63, + 206, + 154, + 231 + ], + "spans": [ + { + "bbox": [ + 63, + 206, + 154, + 231 + ], + "type": "text", + "content": "Question: Subtract all red things. \nSubtract all tiny matte balls. How many objects are left?" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 63, + 238, + 113, + 246 + ], + "lines": [ + { + "bbox": [ + 63, + 238, + 113, + 246 + ], + "spans": [ + { + "bbox": [ + 63, + 238, + 113, + 246 + ], + "type": "text", + "content": "Correct answer: 5" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 242, + 304, + 367, + 316 + ], + "lines": [ + { + "bbox": [ + 242, + 304, + 367, + 316 + ], + "spans": [ + { + "bbox": [ + 242, + 304, + 367, + 316 + ], + "type": "text", + "content": "Figure 4. Qualitative comparison." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 171, + 74, + 214, + 82 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 74, + 214, + 82 + ], + "spans": [ + { + "bbox": [ + 171, + 74, + 214, + 82 + ], + "type": "text", + "content": "Qwen2-VL-7B:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 171, + 84, + 347, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 84, + 347, + 101 + ], + "spans": [ + { + "bbox": [ + 171, + 84, + 347, + 101 + ], + "type": "text", + "content": "First, identify the red objects in the image. There is 1 red object, which is a small red sphere. Incorrect reasoning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 171, + 101, + 350, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 101, + 350, + 140 + ], + "spans": [ + { + "bbox": [ + 171, + 101, + 350, + 140 + ], + "type": "text", + "content": "Then, subtract the red object from the total number of objects. There are 7 objects in total and subtracting the 1 red object leaves 6 objects. Identify the tiny matte balls in the image. There are 2 tiny matte balls, which are the small green cylinder and the cyan sphere. Subtract the tiny matte balls from the remaining objects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 171, + 140, + 337, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 140, + 337, + 156 + ], + "spans": [ + { + "bbox": [ + 171, + 140, + 337, + 156 + ], + "type": "text", + "content": "After subtracting the red object, there are 6 objects left. Subtracting the 2 tiny matte balls leaves 4 objects. Answer: 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 171, + 163, + 209, + 171 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 163, + 209, + 171 + ], + "spans": [ + { + "bbox": [ + 171, + 163, + 209, + 171 + ], + "type": "text", + "content": "Mulberry-7B:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 170, + 174, + 356, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 174, + 356, + 201 + ], + "spans": [ + { + "bbox": [ + 170, + 174, + 356, + 201 + ], + "type": "text", + "content": "Image Description: The image shows several 3D objects on a flat surface. There are two red objects: a small red sphere and a larger green cylinder. Other objects include a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a teal sphere." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 170, + 202, + 353, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 202, + 353, + 223 + ], + "spans": [ + { + "bbox": [ + 170, + 202, + 353, + 223 + ], + "type": "text", + "content": "Rationales: The question asks to subtract all red things and all tiny matte balls. We need to identify these objects in the image and count the remaining ones." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 171, + 224, + 246, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 224, + 246, + 232 + ], + "spans": [ + { + "bbox": [ + 171, + 224, + 246, + 232 + ], + "type": "text", + "content": "Let's think step by step." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 171, + 233, + 354, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 233, + 354, + 264 + ], + "spans": [ + { + "bbox": [ + 171, + 233, + 354, + 264 + ], + "type": "text", + "content": "Step 1: Identify and count all red objects. There is one red sphere. Incorrect reasoning Step 2: Identify and count all tiny matte balls. There is one teal sphere, which is not red, so it is not subtracted." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 171, + 264, + 350, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 264, + 350, + 281 + ], + "spans": [ + { + "bbox": [ + 171, + 264, + 350, + 281 + ], + "type": "text", + "content": "Step 3: Subtract the red objects and tiny matte balls from the total number of objects: " + }, + { + "bbox": [ + 171, + 264, + 350, + 281 + ], + "type": "inline_equation", + "content": "7 - 1 = 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 171, + 282, + 241, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 282, + 241, + 289 + ], + "spans": [ + { + "bbox": [ + 171, + 282, + 241, + 289 + ], + "type": "text", + "content": "The final answer is: 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 369, + 76, + 416, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 76, + 416, + 84 + ], + "spans": [ + { + "bbox": [ + 369, + 76, + 416, + 84 + ], + "type": "text", + "content": "R1-VL-7B (Ours):" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 369, + 89, + 432, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 89, + 432, + 96 + ], + "spans": [ + { + "bbox": [ + 369, + 89, + 432, + 96 + ], + "type": "text", + "content": "Image Description:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 369, + 96, + 545, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 96, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 369, + 96, + 545, + 121 + ], + "type": "text", + "content": "The image shows a collection of 3D geometric objects on a flat surface. There are various colored shapes, including a red sphere, a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a cyan sphere." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 369, + 127, + 411, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 127, + 411, + 134 + ], + "spans": [ + { + "bbox": [ + 369, + 127, + 411, + 134 + ], + "type": "text", + "content": "Rationales:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 369, + 134, + 544, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 134, + 544, + 159 + ], + "spans": [ + { + "bbox": [ + 369, + 134, + 544, + 159 + ], + "type": "text", + "content": "The question asks to subtract all red things and all tiny matte balls, then determine how many objects are left. This involves identifying and counting specific objects based on their color and size." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 369, + 165, + 444, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 165, + 444, + 173 + ], + "spans": [ + { + "bbox": [ + 369, + 165, + 444, + 173 + ], + "type": "text", + "content": "Let's think step by step." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 369, + 178, + 400, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 178, + 400, + 184 + ], + "spans": [ + { + "bbox": [ + 369, + 178, + 400, + 184 + ], + "type": "text", + "content": "Step 1:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 369, + 184, + 542, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 184, + 542, + 205 + ], + "spans": [ + { + "bbox": [ + 369, + 184, + 542, + 205 + ], + "type": "text", + "content": "Identify and count all objects in the image. There are seven objects: a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, a red sphere, and a cyan sphere." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 369, + 209, + 400, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 209, + 400, + 216 + ], + "spans": [ + { + "bbox": [ + 369, + 209, + 400, + 216 + ], + "type": "text", + "content": "Step 2:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 369, + 216, + 495, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 216, + 495, + 223 + ], + "spans": [ + { + "bbox": [ + 369, + 216, + 495, + 223 + ], + "type": "text", + "content": "Subtract all red things. There is one red sphere." + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 500, + 214, + 509, + 223 + ], + "blocks": [ + { + "bbox": [ + 500, + 214, + 509, + 223 + ], + "lines": [ + { + "bbox": [ + 500, + 214, + 509, + 223 + ], + "spans": [ + { + "bbox": [ + 500, + 214, + 509, + 223 + ], + "type": "image", + "image_path": "dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 369, + 228, + 400, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 228, + 400, + 235 + ], + "spans": [ + { + "bbox": [ + 369, + 228, + 400, + 235 + ], + "type": "text", + "content": "Step 3:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 369, + 235, + 542, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 235, + 542, + 249 + ], + "spans": [ + { + "bbox": [ + 369, + 235, + 542, + 249 + ], + "type": "text", + "content": "Subtract all tiny matte balls from the remaining objects. There is one cyan sphere, which is matte and small." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 369, + 253, + 400, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 253, + 400, + 260 + ], + "spans": [ + { + "bbox": [ + 369, + 253, + 400, + 260 + ], + "type": "text", + "content": "Step 4:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 369, + 260, + 539, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 260, + 539, + 274 + ], + "spans": [ + { + "bbox": [ + 369, + 260, + 539, + 274 + ], + "type": "text", + "content": "Count the remaining objects after removing the red sphere and the cyan sphere." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 369, + 278, + 450, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 278, + 450, + 286 + ], + "spans": [ + { + "bbox": [ + 369, + 278, + 450, + 286 + ], + "type": "text", + "content": "The final answer is: 5" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 54, + 336, + 295, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 336, + 295, + 432 + ], + "spans": [ + { + "bbox": [ + 54, + 336, + 295, + 432 + ], + "type": "text", + "content": "ward, where the model is optimized with our proposed stepwise reasoning reward. As shown in Table 4, both outcome-level reward and our step-wise reward improve the warm-up model's performance, while our step-wise reward achieves better performance. This further demonstrates that stepwise rewards are more effective in enhancing MLLMs' reasoning capabilities, as they provide more fine-grained supervision and largely mitigate the sparse reward issue." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 55, + 434, + 295, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 434, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 55, + 434, + 295, + 638 + ], + "type": "text", + "content": "Comparison to supervised fine-tuning (SFT). As discussed before, StepGRPO encourages MLLM to self-improve the reasoning ability with step-wise reward signals rather than merely imitating the successful reasoning paths. Here, we conduct experiments to further compare StepGRPO with SFT. Specifically, we start with the model after the warm-up and conduct the experiments with Qwen2-VL-7B over MathVista. As shown in Fig. 3, under the same number of training steps, StepGRPO consistently outperforms SFT, demonstrating the effectiveness of step-wise reinforcement learning. This is largely attributed to StepGRPO's ability to refine reasoning trajectories through self-exploration and reward-guided optimization, rather than solely relying on passive imitation of reasoning paths. By leveraging step-wise reasoning rewards, StepGRPO provides more rich and informative supervision, leading to better reasoning processes compared to SFT." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "Qualitative comparison. We provide qualitative comparison of Qwen2VL-7B, Mulberry-7B and our R1-VL-7B. As shown in Fig. 4, Qwen2-VL-7B generates relatively short responses, lacking a thorough reasoning process. While Mulberry-7B generates detailed reasoning paths, its intermediate steps contain errors, leading to incorrect final an" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 313, + 336, + 553, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 336, + 553, + 360 + ], + "spans": [ + { + "bbox": [ + 313, + 336, + 553, + 360 + ], + "type": "text", + "content": "svers. In contrast, R1-VL-7B enables more accurate step-by-step reasoning process." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 313, + 360, + 553, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 360, + 553, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 360, + 553, + 384 + ], + "type": "text", + "content": "We provide more discussions, experimental results and qualitative analysis in the appendix." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 313, + 394, + 388, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 394, + 388, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 394, + 388, + 407 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 313, + 415, + 555, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 415, + 555, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 415, + 555, + 640 + ], + "type": "text", + "content": "This paper presents StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise reward mechanism. Specifically, StepGRPO introduces two rule-based reasoning reward mechanisms, i.e., Step-wise Reasoning Accuracy Reward that rewards the intermediate reasoning steps based on a soft key-step matching technique and Step-wise Reasoning Validity Reward that rewards the reasoning path's reasoning structure and logical consistency though a reasoning completeness and logic evaluation method. In this way, StepGRPO enables to effectively mitigate the sparse reward issue for MLLMs without the need of process reward models and encourages more structured and logically consistent reasoning process. With the proposed StepGRPO, we develop R1-VL, a series of MLLMs with superior reasoning capability. Extensive experiments over eight benchmarks demonstrate the superiority of the proposed StepGRPO compared with the state-of-the-art MLLMs." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 313, + 642, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 554, + 713 + ], + "type": "text", + "content": "Acknowledgement. This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL)." + } + ] + } + ], + "index": 38 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 295, + 704 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 61, + 91, + 236, + 101 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 236, + 101 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 236, + 101 + ], + "type": "text", + "content": "[1] Anthropic. Claude 3.5 sonnet, 2024. 1, 2, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 102, + 295, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 102, + 295, + 144 + ], + "spans": [ + { + "bbox": [ + 62, + 102, + 295, + 144 + ], + "type": "text", + "content": "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 144, + 294, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 144, + 294, + 198 + ], + "spans": [ + { + "bbox": [ + 62, + 144, + 294, + 198 + ], + "type": "text", + "content": "[3] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 198, + 294, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 198, + 294, + 252 + ], + "spans": [ + { + "bbox": [ + 62, + 198, + 294, + 252 + ], + "type": "text", + "content": "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 253, + 294, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 253, + 294, + 284 + ], + "spans": [ + { + "bbox": [ + 62, + 253, + 294, + 284 + ], + "type": "text", + "content": "[5] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. arXiv preprint arXiv:2406.10858, 2024. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 285, + 294, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 285, + 294, + 336 + ], + "spans": [ + { + "bbox": [ + 62, + 285, + 294, + 336 + ], + "type": "text", + "content": "[6] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 338, + 294, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 338, + 294, + 380 + ], + "spans": [ + { + "bbox": [ + 62, + 338, + 294, + 380 + ], + "type": "text", + "content": "[7] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "spans": [ + { + "bbox": [ + 62, + 380, + 294, + 434 + ], + "type": "text", + "content": "[8] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 2, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 434, + 294, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 434, + 294, + 488 + ], + "spans": [ + { + "bbox": [ + 62, + 434, + 294, + 488 + ], + "type": "text", + "content": "[9] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 488, + 294, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 488, + 294, + 542 + ], + "spans": [ + { + "bbox": [ + 57, + 488, + 294, + 542 + ], + "type": "text", + "content": "[10] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. 3, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 542, + 294, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 542, + 294, + 596 + ], + "spans": [ + { + "bbox": [ + 57, + 542, + 294, + 596 + ], + "type": "text", + "content": "[11] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 596, + 294, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 596, + 294, + 660 + ], + "spans": [ + { + "bbox": [ + 57, + 596, + 294, + 660 + ], + "type": "text", + "content": "[12] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 661, + 294, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 661, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 57, + 661, + 294, + 704 + ], + "type": "text", + "content": "[13] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 706 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 335, + 73, + 444, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 73, + 444, + 83 + ], + "spans": [ + { + "bbox": [ + 335, + 73, + 444, + 83 + ], + "type": "text", + "content": "arXiv:2501.12948,2025.1,3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 83, + 553, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 83, + 553, + 137 + ], + "spans": [ + { + "bbox": [ + 316, + 83, + 553, + 137 + ], + "type": "text", + "content": "[14] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Zhe Xu, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 3, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 138, + 553, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 138, + 553, + 180 + ], + "spans": [ + { + "bbox": [ + 316, + 138, + 553, + 180 + ], + "type": "text", + "content": "[15] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 2, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 181, + 553, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 181, + 553, + 212 + ], + "spans": [ + { + "bbox": [ + 316, + 181, + 553, + 212 + ], + "type": "text", + "content": "[16] Leslie Pack Kaelbling, Michael L Littman, and Andrew W Moore. Reinforcement learning: A survey. Journal of artificial intelligence research, 4:237-285, 1996. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 213, + 553, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 213, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 316, + 213, + 553, + 255 + ], + "type": "text", + "content": "[17] Xiang Lan, Feng Wu, Kai He, Qinghao Zhao, Shenda Hong, and Mengling Feng. Gem: Empowering mllm for grounded ecg understanding with time series and images. arXiv preprint arXiv:2503.06073, 2025. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 255, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 255, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 255, + 553, + 308 + ], + "type": "text", + "content": "[18] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. In Workshop on Responsibly Building the Next Generation of Multimodal Foundational Models, 2024. 1, 2, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 309, + 553, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 309, + 553, + 363 + ], + "spans": [ + { + "bbox": [ + 316, + 309, + 553, + 363 + ], + "type": "text", + "content": "[19] Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv preprint arXiv:2306.00890, 2023. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 363, + 553, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 363, + 553, + 405 + ], + "spans": [ + { + "bbox": [ + 316, + 363, + 553, + 405 + ], + "type": "text", + "content": "[20] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 406, + 553, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 406, + 553, + 437 + ], + "spans": [ + { + "bbox": [ + 316, + 406, + 553, + 437 + ], + "type": "text", + "content": "[21] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 437, + 553, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 437, + 553, + 480 + ], + "spans": [ + { + "bbox": [ + 316, + 437, + 553, + 480 + ], + "type": "text", + "content": "[22] Yuliang Liu, Biao Yang, Qiang Liu, Zhang Li, Zhiyin Ma, Shuo Zhang, and Xiang Bai. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 481, + 553, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 481, + 553, + 534 + ], + "spans": [ + { + "bbox": [ + 316, + 481, + 553, + 534 + ], + "type": "text", + "content": "[23] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 534, + 553, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 534, + 553, + 566 + ], + "spans": [ + { + "bbox": [ + 316, + 534, + 553, + 566 + ], + "type": "text", + "content": "[24] Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. Reft: Reasoning with reinforced fine-tuning. arXiv preprint arXiv:2401.08967, 2024. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 566, + 553, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 566, + 553, + 619 + ], + "spans": [ + { + "bbox": [ + 316, + 566, + 553, + 619 + ], + "type": "text", + "content": "[25] Chenyang Lyu, Minghao Wu, Longyue Wang, Xinting Huang, Bingshuai Liu, Zefeng Du, Shuming Shi, and Zhaopeng Tu. Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration. arXiv preprint arXiv:2306.09093, 2023. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 620, + 553, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 620, + 553, + 662 + ], + "spans": [ + { + "bbox": [ + 316, + 620, + 553, + 662 + ], + "type": "text", + "content": "[26] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 662, + 553, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 662, + 553, + 706 + ], + "spans": [ + { + "bbox": [ + 316, + 662, + 553, + 706 + ], + "type": "text", + "content": "[27] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Tiancheng Han, Botian Shi, Wenhai Wang, Junjun He, et al. Mm-eureka: Exploring the frontiers of multimodal reasoning with rule-based reinforce" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 705 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 77, + 72, + 285, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 72, + 285, + 83 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 285, + 83 + ], + "type": "text", + "content": "ment learning. arXiv preprint arXiv:2503.07365, 2025. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 84, + 225, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 84, + 225, + 94 + ], + "spans": [ + { + "bbox": [ + 57, + 84, + 225, + 94 + ], + "type": "text", + "content": "[28] OpenAI. Gpt-4 technical report, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 94, + 225, + 104 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 94, + 225, + 104 + ], + "spans": [ + { + "bbox": [ + 57, + 94, + 225, + 104 + ], + "type": "text", + "content": "[29] OpenAI. Introducing openai o1, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 104, + 295, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 104, + 295, + 157 + ], + "spans": [ + { + "bbox": [ + 56, + 104, + 295, + 157 + ], + "type": "text", + "content": "[30] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 157, + 295, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 157, + 295, + 190 + ], + "spans": [ + { + "bbox": [ + 57, + 157, + 295, + 190 + ], + "type": "text", + "content": "[31] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 190, + 295, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 190, + 295, + 243 + ], + "spans": [ + { + "bbox": [ + 56, + 190, + 295, + 243 + ], + "type": "text", + "content": "[32] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 243, + 295, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 295, + 275 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 295, + 275 + ], + "type": "text", + "content": "[33] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 275, + 295, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 275, + 295, + 328 + ], + "spans": [ + { + "bbox": [ + 56, + 275, + 295, + 328 + ], + "type": "text", + "content": "[34] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 1, 3, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 328, + 295, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 328, + 295, + 372 + ], + "spans": [ + { + "bbox": [ + 56, + 328, + 295, + 372 + ], + "type": "text", + "content": "[35] Guangzhi Sun, Wenyi Yu, Changli Tang, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, Yuxuan Wang, and Chao Zhang. video-salmonn: Speech-enhanced audio-visual large language models. arXiv preprint arXiv:2406.15704, 2024. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 372, + 295, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 372, + 295, + 424 + ], + "spans": [ + { + "bbox": [ + 56, + 372, + 295, + 424 + ], + "type": "text", + "content": "[36] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 426, + 295, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 295, + 479 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 295, + 479 + ], + "type": "text", + "content": "[37] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamavol: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. 1, 3, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 479, + 295, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 479, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 479, + 295, + 533 + ], + "type": "text", + "content": "[38] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 1, 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 533, + 295, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 295, + 586 + ], + "type": "text", + "content": "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 586, + 295, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 586, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 56, + 586, + 295, + 640 + ], + "type": "text", + "content": "[40] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2025. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 640, + 295, + 694 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 640, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 56, + 640, + 295, + 694 + ], + "type": "text", + "content": "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 694, + 295, + 705 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 694, + 295, + 705 + ], + "spans": [ + { + "bbox": [ + 56, + 694, + 295, + 705 + ], + "type": "text", + "content": "[42] Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 554, + 708 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 333, + 73, + 554, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 554, + 94 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 554, + 94 + ], + "type": "text", + "content": "Chua. Next-gpt: Any-to-any multimodal lIm. arXiv preprint arXiv:2309.05519, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 95, + 554, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 95, + 554, + 159 + ], + "spans": [ + { + "bbox": [ + 316, + 95, + 554, + 159 + ], + "type": "text", + "content": "[43] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. 1, 2, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 159, + 554, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 159, + 554, + 201 + ], + "spans": [ + { + "bbox": [ + 316, + 159, + 554, + 201 + ], + "type": "text", + "content": "[44] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. 1, 3, 5, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 201, + 554, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 201, + 554, + 256 + ], + "spans": [ + { + "bbox": [ + 316, + 201, + 554, + 256 + ], + "type": "text", + "content": "[45] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. MMreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 256, + 554, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 256, + 554, + 320 + ], + "spans": [ + { + "bbox": [ + 316, + 256, + 554, + 320 + ], + "type": "text", + "content": "[46] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. 1, 3, 5, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 320, + 554, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 320, + 554, + 374 + ], + "spans": [ + { + "bbox": [ + 316, + 320, + 554, + 374 + ], + "type": "text", + "content": "[47] Huanjin Yao, Qixiang Yin, Jingyi Zhang, Min Yang, Yibo Wang, Wenhao Wu, Fei Su, Li Shen, Minghui Qiu, Dacheng Tao, et al. R1-sharev1: Incentivizing reasoning capability of multimodal large language models via share-grpo. arXiv preprint arXiv:2505.16673, 2025. 3, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 374, + 554, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 374, + 554, + 418 + ], + "spans": [ + { + "bbox": [ + 316, + 374, + 554, + 418 + ], + "type": "text", + "content": "[48] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 418, + 554, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 418, + 554, + 471 + ], + "spans": [ + { + "bbox": [ + 316, + 418, + 554, + 471 + ], + "type": "text", + "content": "[49] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Yuhao Dan, Chenlin Zhao, Guohai Xu, Chenliang Li, Junfeng Tian, et al. mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 471, + 554, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 471, + 554, + 514 + ], + "spans": [ + { + "bbox": [ + 316, + 471, + 554, + 514 + ], + "type": "text", + "content": "[50] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 514, + 554, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 514, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 514, + 554, + 567 + ], + "type": "text", + "content": "[51] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 1, 2, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 567, + 554, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 567, + 554, + 610 + ], + "spans": [ + { + "bbox": [ + 316, + 567, + 554, + 610 + ], + "type": "text", + "content": "[52] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 610, + 554, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 610, + 554, + 654 + ], + "spans": [ + { + "bbox": [ + 316, + 610, + 554, + 654 + ], + "type": "text", + "content": "[53] Jingyi Zhang, Jiaxing Huang, Xiaoqin Zhang, Ling Shao, and Shijian Lu. Historical test-time prompt tuning for vision foundation models. Advances in Neural Information Processing Systems, 37:12872-12896, 2024. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 654, + 554, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 654, + 554, + 708 + ], + "spans": [ + { + "bbox": [ + 316, + 654, + 554, + 708 + ], + "type": "text", + "content": "[54] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 72, + 141, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 72, + 141, + 83 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 141, + 83 + ], + "type": "text", + "content": "Springer, 2024. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 84, + 294, + 233 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 57, + 84, + 294, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 84, + 294, + 137 + ], + "spans": [ + { + "bbox": [ + 57, + 84, + 294, + 137 + ], + "type": "text", + "content": "[55] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. 1, 3, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 138, + 294, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 138, + 294, + 180 + ], + "spans": [ + { + "bbox": [ + 57, + 138, + 294, + 180 + ], + "type": "text", + "content": "[56] Xiaoman Zhang, Chaoyi Wu, Ziheng Zhao, Weixiong Lin, Ya Zhang, Yanfeng Wang, and Weidi Xie. Pmc-vqa: Visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415, 2023. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 181, + 294, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 181, + 294, + 233 + ], + "spans": [ + { + "bbox": [ + 57, + 181, + 294, + 233 + ], + "type": "text", + "content": "[57] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 5" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_content_list.json b/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2b743b0b0be1b53aafa131a68b86339b5cd7b15b --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_content_list.json @@ -0,0 +1,877 @@ +[ + { + "type": "text", + "text": "Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms", + "text_level": 1, + "bbox": [ + 88, + 63, + 911, + 131 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Elif Dicle Demir", + "bbox": [ + 171, + 152, + 302, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Electrical and Electronics Eng. Dept.", + "bbox": [ + 109, + 167, + 362, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Koç University", + "bbox": [ + 184, + 185, + 287, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Istanbul, Türkiye", + "bbox": [ + 174, + 200, + 292, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "elifdemir21@ku.edu.tr", + "bbox": [ + 156, + 215, + 310, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Buse Bilgin", + "bbox": [ + 452, + 152, + 544, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "6GEN Lab., Next-Gen R&D", + "bbox": [ + 403, + 169, + 594, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Network Technologies, Turkcell", + "bbox": [ + 392, + 184, + 604, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Istanbul, Türkiye", + "bbox": [ + 437, + 200, + 553, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "buse.bilgin@turkcell.com.tr", + "bbox": [ + 401, + 215, + 589, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mehmet Cengiz Onbaşi", + "bbox": [ + 669, + 152, + 852, + 167 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Electrical and Electronics Eng. Dept.", + "bbox": [ + 633, + 167, + 885, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Koç University", + "bbox": [ + 709, + 185, + 812, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Istanbul, Türkiye", + "bbox": [ + 700, + 200, + 815, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "monbasli@ku.edu.tr", + "bbox": [ + 689, + 215, + 826, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure.", + "bbox": [ + 73, + 273, + 491, + 613 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks.", + "bbox": [ + 73, + 614, + 491, + 665 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 676, + 349, + 691 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards", + "bbox": [ + 73, + 696, + 491, + 907 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "quantum-resistant cryptographic solutions has become a pressing concern.", + "bbox": [ + 503, + 273, + 921, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4]", + "bbox": [ + 501, + 303, + 921, + 589 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments.", + "bbox": [ + 501, + 590, + 921, + 772 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS", + "text_level": 1, + "bbox": [ + 509, + 781, + 915, + 811 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained", + "bbox": [ + 501, + 816, + 921, + 907 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.12952v2 [cs.CR] 31 Mar 2025", + "bbox": [ + 22, + 234, + 57, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography.", + "bbox": [ + 73, + 61, + 491, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed $3.3\\mathrm{GHz}$ clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries.", + "bbox": [ + 73, + 243, + 491, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM", + "text_level": 1, + "bbox": [ + 107, + 404, + 459, + 433 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors.", + "bbox": [ + 73, + 439, + 493, + 786 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in $0.127\\mathrm{ms}$ , whereas Kyber-1024 requires $0.294\\mathrm{ms}$ , demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768.", + "bbox": [ + 73, + 786, + 491, + 907 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The AVX2 optimization significantly reduces execution time, yielding an average speedup of $5.98 \\times$ across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to $6.65 \\times$ due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations.", + "bbox": [ + 501, + 61, + 921, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over $60\\%$ of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification.", + "bbox": [ + 503, + 169, + 921, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The AVX2 speedup for Dilithium is lower than for Kyber $(4.8\\times$ on average), but still significant, particularly in the signing operation, which achieves up to a $5.83\\times$ reduction in execution time. The verification step sees the smallest speedup $(3.76\\times)$ , reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications.", + "bbox": [ + 501, + 306, + 921, + 444 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security.", + "bbox": [ + 501, + 446, + 923, + 566 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg", + "table_caption": [ + "TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER." + ], + "table_footnote": [], + "table_body": "
KYBER 512
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 1632gen: 0.035gen: 0.0075.00
pk: 800enc: 0.040enc: 0.0075.71
ct: 768dec: 0.052dec: 0.0086.50
Total0.1270.0225.77
KYBER 768
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 2400gen: 0.058gen: 0.0115.27
pk: 1184enc: 0.063enc: 0.0115.73
ct: 1088dec: 0.080dec: 0.0126.67
Total0.2010.0345.91
KYBER 1024
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 3168gen: 0.089gen: 0.0155.93
pk: 1568enc: 0.092enc: 0.0156.13
ct: 1568dec: 0.113dec: 0.0176.65
Total0.2940.0476.26
", + "bbox": [ + 506, + 633, + 923, + 887 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg", + "table_caption": [ + "TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM." + ], + "table_footnote": [], + "table_body": "
DILITHIUM 2
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1312gen: 0.094gen: 0.0263.62
sig: 2420sign: 0.445sign: 0.0775.78
verify: 0.104verify: 0.0283.71
Total0.6430.1314.91
DILITHIUM 3
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1952gen: 0.167gen: 0.0453.71
sig: 3293sign: 0.665sign: 0.1205.54
verify: 0.160verify: 0.0453.56
Total0.9920.2104.73
DILITHIUM 5
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 2592gen: 0.253gen: 0.0703.61
sig: 4595sign: 0.840sign: 0.1445.83
verify: 0.267verify: 0.0713.76
Total1.3600.2854.77
", + "bbox": [ + 76, + 109, + 498, + 363 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY", + "text_level": 1, + "bbox": [ + 101, + 393, + 464, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility.", + "bbox": [ + 73, + 436, + 491, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries.", + "bbox": [ + 73, + 635, + 491, + 907 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately $20\\%$ faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over $60\\%$ of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations.", + "bbox": [ + 501, + 61, + 921, + 318 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency.", + "bbox": [ + 501, + 319, + 921, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance.", + "bbox": [ + 503, + 560, + 921, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK", + "text_level": 1, + "bbox": [ + 555, + 797, + 870, + 840 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service", + "bbox": [ + 503, + 845, + 921, + 907 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg", + "table_caption": [ + "TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS." + ], + "table_footnote": [], + "table_body": "
AlgorithmSecurity LevelTotal Time (ms)
Kyber-512128-bit0.127
Kyber-768192-bit0.201
Kyber-1024256-bit0.294
Dilithium-2128-bit0.643
Dilithium-3192-bit0.992
Dilithium-5256-bit1.360
ECDSA(P-256)128-bit0.801
ECDSA(P-384)192-bit1.702
ECDSA(P-512)256-bit2.398
RSA-2048112-bit0.324
RSA-3072128-bit0.884
ECDH(P-256)128-bit0.102
ECDH(P-384)192-bit0.299
ECDH(P-521)256-bit0.903
", + "bbox": [ + 125, + 107, + 441, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues.", + "bbox": [ + 76, + 316, + 488, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A. Challenges", + "text_level": 1, + "bbox": [ + 76, + 388, + 174, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by $1.5\\%$ [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge.", + "bbox": [ + 76, + 409, + 488, + 679 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a \"fail secure\" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure", + "bbox": [ + 76, + 681, + 488, + 906 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "PQC upgrades happen in sync across critical systems or remain backward-compatible.", + "bbox": [ + 508, + 64, + 918, + 90 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to \"start planning\" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively.", + "bbox": [ + 506, + 92, + 918, + 393 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial.", + "bbox": [ + 506, + 393, + 918, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct", + "bbox": [ + 506, + 651, + 918, + 906 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed.", + "bbox": [ + 76, + 63, + 488, + 92 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning.", + "bbox": [ + 76, + 92, + 488, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B. Successful Implementations and Initiatives of PQC", + "text_level": 1, + "bbox": [ + 78, + 359, + 442, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications:", + "bbox": [ + 76, + 378, + 488, + 497 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure.", + "bbox": [ + 76, + 500, + 488, + 830 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ-", + "bbox": [ + 76, + 832, + 488, + 907 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development.", + "bbox": [ + 506, + 61, + 919, + 212 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be \"quantum ready\" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption.", + "bbox": [ + 506, + 213, + 919, + 424 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts.", + "bbox": [ + 506, + 426, + 919, + 666 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "C. Future Outlook and Recommendations", + "text_level": 1, + "bbox": [ + 508, + 676, + 790, + 690 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate \"harvest now, decrypt later\" risks. Awareness and education are also crucial for leadership and technical teams.", + "bbox": [ + 506, + 696, + 919, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial", + "bbox": [ + 508, + 862, + 919, + 907 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s.", + "bbox": [ + 73, + 61, + 491, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with \"quantum-safe\" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further.", + "bbox": [ + 73, + 198, + 491, + 348 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance.", + "bbox": [ + 73, + 348, + 490, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "VI. CONCLUSION", + "text_level": 1, + "bbox": [ + 217, + 474, + 346, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization.", + "bbox": [ + 73, + 492, + 490, + 642 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation.", + "bbox": [ + 73, + 643, + 491, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as", + "bbox": [ + 73, + 794, + 491, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era.", + "bbox": [ + 501, + 61, + 921, + 229 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 663, + 241, + 761, + 255 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, \"Transitioning organizations to post-quantum cryptography,\" Nature, vol. 605, no. 7909, pp. 237–243, 2022.", + "[2] D. J. Bernstein and T. Lange, \"Post-quantum cryptography,\" Nature, vol. 549, no. 7671, pp. 188-194, 2017.", + "[3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., \"Status report on the first round of the NIST post-quantum cryptography standardization process,\" 2019.", + "[4] National Institute of Standards and Technology, \"Post-Quantum Cryptography Standardization,\" 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm", + "[5] GSM Association, \"Post Quantum Cryptography - Guidelines for Telecom Use Cases,\" GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf.", + "[6] PKI Consortium, \"Key takeaways of the PQC conference in Austin,\" January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/.", + "[7] U. Government, \"Report on post-quantum cryptography,\" Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf", + "[8] J. Taaffe, \"Are telcos ready for a quantum leap?\" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap.", + "[9] SoftBank Corp. and SandboxAQ, \"SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers,\" March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers.", + "[10] SoftBank Corp., \"SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology,\" February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/.", + "[11] Thales Group and SK Telecom, \"Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks,\" 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case." + ], + "bbox": [ + 508, + 263, + 944, + 823 + ], + "page_idx": 5 + } +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_model.json b/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..471b836172ae88a36afe755f27acee6750eef05b --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_model.json @@ -0,0 +1,1004 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.236, + 0.058, + 0.683 + ], + "angle": 270, + "content": "arXiv:2503.12952v2 [cs.CR] 31 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.064, + 0.912, + 0.132 + ], + "angle": 0, + "content": "Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.153, + 0.303, + 0.168 + ], + "angle": 0, + "content": "Elif Dicle Demir" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.169, + 0.364, + 0.184 + ], + "angle": 0, + "content": "Electrical and Electronics Eng. Dept." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.186, + 0.289, + 0.199 + ], + "angle": 0, + "content": "Koç University" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.201, + 0.293, + 0.215 + ], + "angle": 0, + "content": "Istanbul, Türkiye" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.217, + 0.312, + 0.229 + ], + "angle": 0, + "content": "elifdemir21@ku.edu.tr" + }, + { + "type": "text", + "bbox": [ + 0.453, + 0.153, + 0.545, + 0.168 + ], + "angle": 0, + "content": "Buse Bilgin" + }, + { + "type": "text", + "bbox": [ + 0.404, + 0.17, + 0.596, + 0.183 + ], + "angle": 0, + "content": "6GEN Lab., Next-Gen R&D" + }, + { + "type": "text", + "bbox": [ + 0.393, + 0.185, + 0.605, + 0.199 + ], + "angle": 0, + "content": "Network Technologies, Turkcell" + }, + { + "type": "text", + "bbox": [ + 0.438, + 0.201, + 0.555, + 0.215 + ], + "angle": 0, + "content": "Istanbul, Türkiye" + }, + { + "type": "text", + "bbox": [ + 0.403, + 0.217, + 0.591, + 0.23 + ], + "angle": 0, + "content": "buse.bilgin@turkcell.com.tr" + }, + { + "type": "text", + "bbox": [ + 0.67, + 0.153, + 0.853, + 0.168 + ], + "angle": 0, + "content": "Mehmet Cengiz Onbaşi" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.169, + 0.887, + 0.184 + ], + "angle": 0, + "content": "Electrical and Electronics Eng. Dept." + }, + { + "type": "text", + "bbox": [ + 0.71, + 0.186, + 0.813, + 0.2 + ], + "angle": 0, + "content": "Koç University" + }, + { + "type": "text", + "bbox": [ + 0.701, + 0.201, + 0.816, + 0.215 + ], + "angle": 0, + "content": "Istanbul, Türkiye" + }, + { + "type": "text", + "bbox": [ + 0.691, + 0.217, + 0.827, + 0.229 + ], + "angle": 0, + "content": "monbasli@ku.edu.tr" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.275, + 0.493, + 0.614 + ], + "angle": 0, + "content": "Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.615, + 0.492, + 0.666 + ], + "angle": 0, + "content": "Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.678, + 0.35, + 0.692 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.697, + 0.493, + 0.909 + ], + "angle": 0, + "content": "Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.274, + 0.922, + 0.304 + ], + "angle": 0, + "content": "quantum-resistant cryptographic solutions has become a pressing concern." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.304, + 0.923, + 0.59 + ], + "angle": 0, + "content": "To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.592, + 0.923, + 0.773 + ], + "angle": 0, + "content": "This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.782, + 0.916, + 0.812 + ], + "angle": 0, + "content": "II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.817, + 0.922, + 0.909 + ], + "angle": 0, + "content": "To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.074, + 0.063, + 0.493, + 0.244 + ], + "angle": 0, + "content": "environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.244, + 0.493, + 0.395 + ], + "angle": 0, + "content": "Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed \\(3.3\\mathrm{GHz}\\) clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries." + }, + { + "type": "title", + "bbox": [ + 0.108, + 0.405, + 0.46, + 0.434 + ], + "angle": 0, + "content": "III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.44, + 0.495, + 0.787 + ], + "angle": 0, + "content": "Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.787, + 0.493, + 0.909 + ], + "angle": 0, + "content": "As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in \\(0.127\\mathrm{ms}\\), whereas Kyber-1024 requires \\(0.294\\mathrm{ms}\\), demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.063, + 0.923, + 0.169 + ], + "angle": 0, + "content": "The AVX2 optimization significantly reduces execution time, yielding an average speedup of \\(5.98 \\times\\) across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to \\(6.65 \\times\\) due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.17, + 0.923, + 0.306 + ], + "angle": 0, + "content": "Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over \\(60\\%\\) of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.308, + 0.923, + 0.445 + ], + "angle": 0, + "content": "The AVX2 speedup for Dilithium is lower than for Kyber \\((4.8\\times\\) on average), but still significant, particularly in the signing operation, which achieves up to a \\(5.83\\times\\) reduction in execution time. The verification step sees the smallest speedup \\((3.76\\times)\\), reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.447, + 0.924, + 0.568 + ], + "angle": 0, + "content": "Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security." + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.591, + 0.921, + 0.626 + ], + "angle": 0, + "content": "TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.635, + 0.924, + 0.888 + ], + "angle": 0, + "content": "
KYBER 512
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 1632gen: 0.035gen: 0.0075.00
pk: 800enc: 0.040enc: 0.0075.71
ct: 768dec: 0.052dec: 0.0086.50
Total0.1270.0225.77
KYBER 768
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 2400gen: 0.058gen: 0.0115.27
pk: 1184enc: 0.063enc: 0.0115.73
ct: 1088dec: 0.080dec: 0.0126.67
Total0.2010.0345.91
KYBER 1024
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 3168gen: 0.089gen: 0.0155.93
pk: 1568enc: 0.092enc: 0.0156.13
ct: 1568dec: 0.113dec: 0.0176.65
Total0.2940.0476.26
" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.107, + 0.065, + 0.461, + 0.099 + ], + "angle": 0, + "content": "TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.11, + 0.499, + 0.364 + ], + "angle": 0, + "content": "
DILITHIUM 2
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1312gen: 0.094gen: 0.0263.62
sig: 2420sign: 0.445sign: 0.0775.78
verify: 0.104verify: 0.0283.71
Total0.6430.1314.91
DILITHIUM 3
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1952gen: 0.167gen: 0.0453.71
sig: 3293sign: 0.665sign: 0.1205.54
verify: 0.160verify: 0.0453.56
Total0.9920.2104.73
DILITHIUM 5
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 2592gen: 0.253gen: 0.0703.61
sig: 4595sign: 0.840sign: 0.1445.83
verify: 0.267verify: 0.0713.76
Total1.3600.2854.77
" + }, + { + "type": "title", + "bbox": [ + 0.102, + 0.394, + 0.465, + 0.424 + ], + "angle": 0, + "content": "IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.438, + 0.492, + 0.635 + ], + "angle": 0, + "content": "Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.636, + 0.493, + 0.909 + ], + "angle": 0, + "content": "Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.063, + 0.923, + 0.319 + ], + "angle": 0, + "content": "Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately \\(20\\%\\) faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over \\(60\\%\\) of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.32, + 0.923, + 0.56 + ], + "angle": 0, + "content": "The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.561, + 0.923, + 0.789 + ], + "angle": 0, + "content": "These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance." + }, + { + "type": "title", + "bbox": [ + 0.556, + 0.798, + 0.871, + 0.842 + ], + "angle": 0, + "content": "V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.847, + 0.922, + 0.908 + ], + "angle": 0, + "content": "Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.093, + 0.065, + 0.476, + 0.098 + ], + "angle": 0, + "content": "TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS." + }, + { + "type": "table", + "bbox": [ + 0.126, + 0.108, + 0.442, + 0.288 + ], + "angle": 0, + "content": "
AlgorithmSecurity LevelTotal Time (ms)
Kyber-512128-bit0.127
Kyber-768192-bit0.201
Kyber-1024256-bit0.294
Dilithium-2128-bit0.643
Dilithium-3192-bit0.992
Dilithium-5256-bit1.360
ECDSA(P-256)128-bit0.801
ECDSA(P-384)192-bit1.702
ECDSA(P-512)256-bit2.398
RSA-2048112-bit0.324
RSA-3072128-bit0.884
ECDH(P-256)128-bit0.102
ECDH(P-384)192-bit0.299
ECDH(P-521)256-bit0.903
" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.317, + 0.49, + 0.375 + ], + "angle": 0, + "content": "continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.39, + 0.175, + 0.403 + ], + "angle": 0, + "content": "A. Challenges" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.41, + 0.49, + 0.68 + ], + "angle": 0, + "content": "1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by \\(1.5\\%\\) [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.682, + 0.49, + 0.907 + ], + "angle": 0, + "content": "2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a \"fail secure\" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.065, + 0.919, + 0.092 + ], + "angle": 0, + "content": "PQC upgrades happen in sync across critical systems or remain backward-compatible." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.093, + 0.919, + 0.394 + ], + "angle": 0, + "content": "3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to \"start planning\" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.395, + 0.919, + 0.651 + ], + "angle": 0, + "content": "4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.652, + 0.919, + 0.907 + ], + "angle": 0, + "content": "5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.064, + 0.49, + 0.093 + ], + "angle": 0, + "content": "extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.093, + 0.49, + 0.35 + ], + "angle": 0, + "content": "6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.36, + 0.443, + 0.375 + ], + "angle": 0, + "content": "B. Successful Implementations and Initiatives of PQC" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.379, + 0.49, + 0.498 + ], + "angle": 0, + "content": "Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.5, + 0.49, + 0.831 + ], + "angle": 0, + "content": "1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.833, + 0.49, + 0.908 + ], + "angle": 0, + "content": "2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ-" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.063, + 0.92, + 0.213 + ], + "angle": 0, + "content": "ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.214, + 0.92, + 0.425 + ], + "angle": 0, + "content": "3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be \"quantum ready\" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.427, + 0.92, + 0.667 + ], + "angle": 0, + "content": "Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.677, + 0.791, + 0.691 + ], + "angle": 0, + "content": "C. Future Outlook and Recommendations" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.697, + 0.92, + 0.862 + ], + "angle": 0, + "content": "The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate \"harvest now, decrypt later\" risks. Awareness and education are also crucial for leadership and technical teams." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.863, + 0.92, + 0.908 + ], + "angle": 0, + "content": "A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.074, + 0.063, + 0.492, + 0.198 + ], + "angle": 0, + "content": "transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.199, + 0.492, + 0.349 + ], + "angle": 0, + "content": "Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with \"quantum-safe\" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.349, + 0.491, + 0.471 + ], + "angle": 0, + "content": "By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.476, + 0.348, + 0.489 + ], + "angle": 0, + "content": "VI. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.493, + 0.491, + 0.643 + ], + "angle": 0, + "content": "The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.645, + 0.492, + 0.794 + ], + "angle": 0, + "content": "However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.795, + 0.492, + 0.825 + ], + "angle": 0, + "content": "Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.063, + 0.923, + 0.23 + ], + "angle": 0, + "content": "standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era." + }, + { + "type": "title", + "bbox": [ + 0.665, + 0.242, + 0.762, + 0.256 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.265, + 0.921, + 0.311 + ], + "angle": 0, + "content": "[1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, \"Transitioning organizations to post-quantum cryptography,\" Nature, vol. 605, no. 7909, pp. 237–243, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.312, + 0.921, + 0.334 + ], + "angle": 0, + "content": "[2] D. J. Bernstein and T. Lange, \"Post-quantum cryptography,\" Nature, vol. 549, no. 7671, pp. 188-194, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.335, + 0.921, + 0.379 + ], + "angle": 0, + "content": "[3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., \"Status report on the first round of the NIST post-quantum cryptography standardization process,\" 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.38, + 0.921, + 0.426 + ], + "angle": 0, + "content": "[4] National Institute of Standards and Technology, \"Post-Quantum Cryptography Standardization,\" 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.426, + 0.921, + 0.482 + ], + "angle": 0, + "content": "[5] GSM Association, \"Post Quantum Cryptography - Guidelines for Telecom Use Cases,\" GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.483, + 0.945, + 0.516 + ], + "angle": 0, + "content": "[6] PKI Consortium, \"Key takeaways of the PQC conference in Austin,\" January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.517, + 0.921, + 0.596 + ], + "angle": 0, + "content": "[7] U. Government, \"Report on post-quantum cryptography,\" Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.597, + 0.921, + 0.642 + ], + "angle": 0, + "content": "[8] J. Taaffe, \"Are telcos ready for a quantum leap?\" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.643, + 0.921, + 0.72 + ], + "angle": 0, + "content": "[9] SoftBank Corp. and SandboxAQ, \"SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers,\" March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.721, + 0.921, + 0.767 + ], + "angle": 0, + "content": "[10] SoftBank Corp., \"SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology,\" February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.767, + 0.921, + 0.824 + ], + "angle": 0, + "content": "[11] Thales Group and SK Telecom, \"Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks,\" 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case." + }, + { + "type": "list", + "bbox": [ + 0.509, + 0.265, + 0.945, + 0.824 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf b/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8c9843008e0e4cd5acf28c15ee85ab34c3465450 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df3f69ac08c6f23f86d8e84d7001a87077ea37f272f0609414bfdb0bf470d8c1 +size 105041 diff --git a/data/2025/2503_12xxx/2503.12952/full.md b/data/2025/2503_12xxx/2503.12952/full.md new file mode 100644 index 0000000000000000000000000000000000000000..173d23c7b749bbc1d8c59173508adf50c8cf9527 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/full.md @@ -0,0 +1,165 @@ +# Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms + +Elif Dicle Demir + +Electrical and Electronics Eng. Dept. + +Koç University + +Istanbul, Türkiye + +elifdemir21@ku.edu.tr + +Buse Bilgin + +6GEN Lab., Next-Gen R&D + +Network Technologies, Turkcell + +Istanbul, Türkiye + +buse.bilgin@turkcell.com.tr + +Mehmet Cengiz Onbaşi + +Electrical and Electronics Eng. Dept. + +Koç University + +Istanbul, Türkiye + +monbasli@ku.edu.tr + +Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure. + +Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks. + +# I. INTRODUCTION + +Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards + +quantum-resistant cryptographic solutions has become a pressing concern. + +To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4] + +This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments. + +# II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS + +To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained + +environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography. + +Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed $3.3\mathrm{GHz}$ clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries. + +# III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM + +Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors. + +As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in $0.127\mathrm{ms}$ , whereas Kyber-1024 requires $0.294\mathrm{ms}$ , demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768. + +The AVX2 optimization significantly reduces execution time, yielding an average speedup of $5.98 \times$ across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to $6.65 \times$ due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations. + +Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over $60\%$ of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification. + +The AVX2 speedup for Dilithium is lower than for Kyber $(4.8\times$ on average), but still significant, particularly in the signing operation, which achieves up to a $5.83\times$ reduction in execution time. The verification step sees the smallest speedup $(3.76\times)$ , reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications. + +Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security. + +TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER. + +
KYBER 512
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 1632gen: 0.035gen: 0.0075.00
pk: 800enc: 0.040enc: 0.0075.71
ct: 768dec: 0.052dec: 0.0086.50
Total0.1270.0225.77
KYBER 768
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 2400gen: 0.058gen: 0.0115.27
pk: 1184enc: 0.063enc: 0.0115.73
ct: 1088dec: 0.080dec: 0.0126.67
Total0.2010.0345.91
KYBER 1024
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 3168gen: 0.089gen: 0.0155.93
pk: 1568enc: 0.092enc: 0.0156.13
ct: 1568dec: 0.113dec: 0.0176.65
Total0.2940.0476.26
+ +TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM. + +
DILITHIUM 2
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1312gen: 0.094gen: 0.0263.62
sig: 2420sign: 0.445sign: 0.0775.78
verify: 0.104verify: 0.0283.71
Total0.6430.1314.91
DILITHIUM 3
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1952gen: 0.167gen: 0.0453.71
sig: 3293sign: 0.665sign: 0.1205.54
verify: 0.160verify: 0.0453.56
Total0.9920.2104.73
DILITHIUM 5
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 2592gen: 0.253gen: 0.0703.61
sig: 4595sign: 0.840sign: 0.1445.83
verify: 0.267verify: 0.0713.76
Total1.3600.2854.77
+ +# IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY + +Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility. + +Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries. + +Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately $20\%$ faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over $60\%$ of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations. + +The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency. + +These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance. + +# V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK + +Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service + +TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS. + +
AlgorithmSecurity LevelTotal Time (ms)
Kyber-512128-bit0.127
Kyber-768192-bit0.201
Kyber-1024256-bit0.294
Dilithium-2128-bit0.643
Dilithium-3192-bit0.992
Dilithium-5256-bit1.360
ECDSA(P-256)128-bit0.801
ECDSA(P-384)192-bit1.702
ECDSA(P-512)256-bit2.398
RSA-2048112-bit0.324
RSA-3072128-bit0.884
ECDH(P-256)128-bit0.102
ECDH(P-384)192-bit0.299
ECDH(P-521)256-bit0.903
+ +continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues. + +# A. Challenges + +1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by $1.5\%$ [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge. + +2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a "fail secure" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure + +PQC upgrades happen in sync across critical systems or remain backward-compatible. + +3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to "start planning" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively. + +4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial. + +5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct + +extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed. + +6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning. + +# B. Successful Implementations and Initiatives of PQC + +Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications: + +1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure. + +2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ- + +ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development. + +3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be "quantum ready" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption. + +Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts. + +# C. Future Outlook and Recommendations + +The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate "harvest now, decrypt later" risks. Awareness and education are also crucial for leadership and technical teams. + +A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial + +transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s. + +Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with "quantum-safe" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further. + +By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance. + +# VI. CONCLUSION + +The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization. + +However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation. + +Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as + +standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era. + +# REFERENCES + +[1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, "Transitioning organizations to post-quantum cryptography," Nature, vol. 605, no. 7909, pp. 237–243, 2022. +[2] D. J. Bernstein and T. Lange, "Post-quantum cryptography," Nature, vol. 549, no. 7671, pp. 188-194, 2017. +[3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., "Status report on the first round of the NIST post-quantum cryptography standardization process," 2019. +[4] National Institute of Standards and Technology, "Post-Quantum Cryptography Standardization," 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm +[5] GSM Association, "Post Quantum Cryptography - Guidelines for Telecom Use Cases," GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf. +[6] PKI Consortium, "Key takeaways of the PQC conference in Austin," January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/. +[7] U. Government, "Report on post-quantum cryptography," Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf +[8] J. Taaffe, "Are telcos ready for a quantum leap?" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap. +[9] SoftBank Corp. and SandboxAQ, "SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers," March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers. +[10] SoftBank Corp., "SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology," February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/. +[11] Thales Group and SK Telecom, "Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks," 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case. \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12952/images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg b/data/2025/2503_12xxx/2503.12952/images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87c9e1c1238688bebd04df5c41ac3236409edc1a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3ba7c5d51e7cf29a56aaa4f2bfdf6090119fa132bea47776467264bd0dfb188 +size 87866 diff --git a/data/2025/2503_12xxx/2503.12952/images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg b/data/2025/2503_12xxx/2503.12952/images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94c52907b09e0065748ee42dfd86fde5831e2b93 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78e1f05fc7daaf5836dc30367671a2afa569db24e9579d48b2d589417942ff54 +size 59525 diff --git a/data/2025/2503_12xxx/2503.12952/images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg b/data/2025/2503_12xxx/2503.12952/images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4be2bf483031a1c14dda1cb5fa5cf45478d1176e --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e59ab1d1c63985c1c513de4b464b7b687ba73e68ad68988ac6d2d494f9c7cc4 +size 83166 diff --git a/data/2025/2503_12xxx/2503.12952/layout.json b/data/2025/2503_12xxx/2503.12952/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0f4faf71f08e028d1f2eab083b111f031026cf54 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12952/layout.json @@ -0,0 +1,3297 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 54, + 50, + 558, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 50, + 558, + 104 + ], + "spans": [ + { + "bbox": [ + 54, + 50, + 558, + 104 + ], + "type": "text", + "content": "Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 185, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 185, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 185, + 133 + ], + "type": "text", + "content": "Elif Dicle Demir" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 133, + 222, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 222, + 145 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 222, + 145 + ], + "type": "text", + "content": "Electrical and Electronics Eng. Dept." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 147, + 176, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 147, + 176, + 157 + ], + "spans": [ + { + "bbox": [ + 113, + 147, + 176, + 157 + ], + "type": "text", + "content": "Koç University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 159, + 179, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 159, + 179, + 170 + ], + "spans": [ + { + "bbox": [ + 107, + 159, + 179, + 170 + ], + "type": "text", + "content": "Istanbul, Türkiye" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 171, + 190, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 171, + 190, + 181 + ], + "spans": [ + { + "bbox": [ + 96, + 171, + 190, + 181 + ], + "type": "text", + "content": "elifdemir21@ku.edu.tr" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 277, + 121, + 333, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 121, + 333, + 133 + ], + "spans": [ + { + "bbox": [ + 277, + 121, + 333, + 133 + ], + "type": "text", + "content": "Buse Bilgin" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 247, + 134, + 364, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 134, + 364, + 144 + ], + "spans": [ + { + "bbox": [ + 247, + 134, + 364, + 144 + ], + "type": "text", + "content": "6GEN Lab., Next-Gen R&D" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 240, + 146, + 370, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 146, + 370, + 157 + ], + "spans": [ + { + "bbox": [ + 240, + 146, + 370, + 157 + ], + "type": "text", + "content": "Network Technologies, Turkcell" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 268, + 159, + 339, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 159, + 339, + 170 + ], + "spans": [ + { + "bbox": [ + 268, + 159, + 339, + 170 + ], + "type": "text", + "content": "Istanbul, Türkiye" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 246, + 171, + 361, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 171, + 361, + 182 + ], + "spans": [ + { + "bbox": [ + 246, + 171, + 361, + 182 + ], + "type": "text", + "content": "buse.bilgin@turkcell.com.tr" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 410, + 121, + 522, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 121, + 522, + 133 + ], + "spans": [ + { + "bbox": [ + 410, + 121, + 522, + 133 + ], + "type": "text", + "content": "Mehmet Cengiz Onbaşi" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 388, + 133, + 542, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 133, + 542, + 145 + ], + "spans": [ + { + "bbox": [ + 388, + 133, + 542, + 145 + ], + "type": "text", + "content": "Electrical and Electronics Eng. Dept." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 434, + 147, + 497, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 434, + 147, + 497, + 158 + ], + "spans": [ + { + "bbox": [ + 434, + 147, + 497, + 158 + ], + "type": "text", + "content": "Koç University" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 429, + 159, + 499, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 159, + 499, + 170 + ], + "spans": [ + { + "bbox": [ + 429, + 159, + 499, + 170 + ], + "type": "text", + "content": "Istanbul, Türkiye" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 422, + 171, + 506, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 171, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 422, + 171, + 506, + 181 + ], + "type": "text", + "content": "monbasli@ku.edu.tr" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 45, + 217, + 301, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 217, + 301, + 486 + ], + "spans": [ + { + "bbox": [ + 45, + 217, + 301, + 486 + ], + "type": "text", + "content": "Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 45, + 487, + 301, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 487, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 45, + 487, + 301, + 527 + ], + "type": "text", + "content": "Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 536, + 214, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 214, + 548 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 214, + 548 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 45, + 552, + 301, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 552, + 301, + 719 + ], + "spans": [ + { + "bbox": [ + 45, + 552, + 301, + 719 + ], + "type": "text", + "content": "Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 217, + 564, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 564, + 240 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 564, + 240 + ], + "type": "text", + "content": "quantum-resistant cryptographic solutions has become a pressing concern." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 240, + 564, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 240, + 564, + 467 + ], + "spans": [ + { + "bbox": [ + 307, + 240, + 564, + 467 + ], + "type": "text", + "content": "To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4]" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 468, + 564, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 468, + 564, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 468, + 564, + 612 + ], + "type": "text", + "content": "This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 619, + 560, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 619, + 560, + 643 + ], + "spans": [ + { + "bbox": [ + 312, + 619, + 560, + 643 + ], + "type": "text", + "content": "II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 647, + 564, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 564, + 719 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 564, + 719 + ], + "type": "text", + "content": "To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 186, + 35, + 540 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 186, + 35, + 540 + ], + "spans": [ + { + "bbox": [ + 14, + 186, + 35, + 540 + ], + "type": "text", + "content": "arXiv:2503.12952v2 [cs.CR] 31 Mar 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 49, + 301, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 49, + 301, + 193 + ], + "spans": [ + { + "bbox": [ + 45, + 49, + 301, + 193 + ], + "type": "text", + "content": "environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 193, + 301, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 193, + 301, + 312 + ], + "spans": [ + { + "bbox": [ + 45, + 193, + 301, + 312 + ], + "type": "text", + "content": "Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed " + }, + { + "bbox": [ + 45, + 193, + 301, + 312 + ], + "type": "inline_equation", + "content": "3.3\\mathrm{GHz}" + }, + { + "bbox": [ + 45, + 193, + 301, + 312 + ], + "type": "text", + "content": " clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 320, + 281, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 320, + 281, + 343 + ], + "spans": [ + { + "bbox": [ + 66, + 320, + 281, + 343 + ], + "type": "text", + "content": "III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 348, + 302, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 348, + 302, + 623 + ], + "spans": [ + { + "bbox": [ + 45, + 348, + 302, + 623 + ], + "type": "text", + "content": "Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "spans": [ + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "type": "text", + "content": "As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in " + }, + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "type": "inline_equation", + "content": "0.127\\mathrm{ms}" + }, + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "type": "text", + "content": ", whereas Kyber-1024 requires " + }, + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "type": "inline_equation", + "content": "0.294\\mathrm{ms}" + }, + { + "bbox": [ + 45, + 623, + 301, + 719 + ], + "type": "text", + "content": ", demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "spans": [ + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "type": "text", + "content": "The AVX2 optimization significantly reduces execution time, yielding an average speedup of " + }, + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "type": "inline_equation", + "content": "5.98 \\times" + }, + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "type": "text", + "content": " across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to " + }, + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "type": "inline_equation", + "content": "6.65 \\times" + }, + { + "bbox": [ + 307, + 49, + 564, + 133 + ], + "type": "text", + "content": " due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 134, + 564, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 134, + 564, + 242 + ], + "spans": [ + { + "bbox": [ + 308, + 134, + 564, + 242 + ], + "type": "text", + "content": "Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over " + }, + { + "bbox": [ + 308, + 134, + 564, + 242 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 308, + 134, + 564, + 242 + ], + "type": "text", + "content": " of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "text", + "content": "The AVX2 speedup for Dilithium is lower than for Kyber " + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "inline_equation", + "content": "(4.8\\times" + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "text", + "content": " on average), but still significant, particularly in the signing operation, which achieves up to a " + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "inline_equation", + "content": "5.83\\times" + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "text", + "content": " reduction in execution time. The verification step sees the smallest speedup " + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "inline_equation", + "content": "(3.76\\times)" + }, + { + "bbox": [ + 307, + 243, + 564, + 352 + ], + "type": "text", + "content": ", reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 354, + 565, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 565, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 565, + 449 + ], + "type": "text", + "content": "Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 502, + 565, + 703 + ], + "blocks": [ + { + "bbox": [ + 310, + 468, + 563, + 495 + ], + "lines": [ + { + "bbox": [ + 310, + 468, + 563, + 495 + ], + "spans": [ + { + "bbox": [ + 310, + 468, + 563, + 495 + ], + "type": "text", + "content": "TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 502, + 565, + 703 + ], + "lines": [ + { + "bbox": [ + 310, + 502, + 565, + 703 + ], + "spans": [ + { + "bbox": [ + 310, + 502, + 565, + 703 + ], + "type": "table", + "html": "
KYBER 512
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 1632gen: 0.035gen: 0.0075.00
pk: 800enc: 0.040enc: 0.0075.71
ct: 768dec: 0.052dec: 0.0086.50
Total0.1270.0225.77
KYBER 768
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 2400gen: 0.058gen: 0.0115.27
pk: 1184enc: 0.063enc: 0.0115.73
ct: 1088dec: 0.080dec: 0.0126.67
Total0.2010.0345.91
KYBER 1024
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
sk: 3168gen: 0.089gen: 0.0155.93
pk: 1568enc: 0.092enc: 0.0156.13
ct: 1568dec: 0.113dec: 0.0176.65
Total0.2940.0476.26
", + "image_path": "9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 87, + 305, + 288 + ], + "blocks": [ + { + "bbox": [ + 65, + 51, + 282, + 78 + ], + "lines": [ + { + "bbox": [ + 65, + 51, + 282, + 78 + ], + "spans": [ + { + "bbox": [ + 65, + 51, + 282, + 78 + ], + "type": "text", + "content": "TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 87, + 305, + 288 + ], + "lines": [ + { + "bbox": [ + 47, + 87, + 305, + 288 + ], + "spans": [ + { + "bbox": [ + 47, + 87, + 305, + 288 + ], + "type": "table", + "html": "
DILITHIUM 2
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1312gen: 0.094gen: 0.0263.62
sig: 2420sign: 0.445sign: 0.0775.78
verify: 0.104verify: 0.0283.71
Total0.6430.1314.91
DILITHIUM 3
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 1952gen: 0.167gen: 0.0453.71
sig: 3293sign: 0.665sign: 0.1205.54
verify: 0.160verify: 0.0453.56
Total0.9920.2104.73
DILITHIUM 5
Sizes (Bytes)Reference (ms)AVX2 (ms)AVX2 Speedup Rate
pk: 2592gen: 0.253gen: 0.0703.61
sig: 4595sign: 0.840sign: 0.1445.83
verify: 0.267verify: 0.0713.76
Total1.3600.2854.77
", + "image_path": "cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 312, + 284, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 312, + 284, + 335 + ], + "spans": [ + { + "bbox": [ + 62, + 312, + 284, + 335 + ], + "type": "text", + "content": "IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 346, + 301, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 346, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 45, + 346, + 301, + 502 + ], + "type": "text", + "content": "Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 503, + 301, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 503, + 301, + 719 + ], + "spans": [ + { + "bbox": [ + 45, + 503, + 301, + 719 + ], + "type": "text", + "content": "Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "type": "text", + "content": "Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately " + }, + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "type": "text", + "content": " faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over " + }, + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 307, + 49, + 564, + 252 + ], + "type": "text", + "content": " of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 253, + 564, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 564, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 564, + 443 + ], + "type": "text", + "content": "The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 444, + 564, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 564, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 564, + 624 + ], + "type": "text", + "content": "These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 340, + 632, + 533, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 632, + 533, + 666 + ], + "spans": [ + { + "bbox": [ + 340, + 632, + 533, + 666 + ], + "type": "text", + "content": "V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 670, + 564, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 564, + 719 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 564, + 719 + ], + "type": "text", + "content": "Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 85, + 270, + 228 + ], + "blocks": [ + { + "bbox": [ + 56, + 51, + 291, + 77 + ], + "lines": [ + { + "bbox": [ + 56, + 51, + 291, + 77 + ], + "spans": [ + { + "bbox": [ + 56, + 51, + 291, + 77 + ], + "type": "text", + "content": "TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 77, + 85, + 270, + 228 + ], + "lines": [ + { + "bbox": [ + 77, + 85, + 270, + 228 + ], + "spans": [ + { + "bbox": [ + 77, + 85, + 270, + 228 + ], + "type": "table", + "html": "
AlgorithmSecurity LevelTotal Time (ms)
Kyber-512128-bit0.127
Kyber-768192-bit0.201
Kyber-1024256-bit0.294
Dilithium-2128-bit0.643
Dilithium-3192-bit0.992
Dilithium-5256-bit1.360
ECDSA(P-256)128-bit0.801
ECDSA(P-384)192-bit1.702
ECDSA(P-512)256-bit2.398
RSA-2048112-bit0.324
RSA-3072128-bit0.884
ECDH(P-256)128-bit0.102
ECDH(P-384)192-bit0.299
ECDH(P-521)256-bit0.903
", + "image_path": "bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 251, + 299, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 299, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 299, + 297 + ], + "type": "text", + "content": "continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 308, + 107, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 107, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 107, + 319 + ], + "type": "text", + "content": "A. Challenges" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 324, + 299, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 324, + 299, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 324, + 299, + 538 + ], + "type": "text", + "content": "1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by " + }, + { + "bbox": [ + 47, + 324, + 299, + 538 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 47, + 324, + 299, + 538 + ], + "type": "text", + "content": " [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 540, + 299, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 540, + 299, + 718 + ], + "spans": [ + { + "bbox": [ + 47, + 540, + 299, + 718 + ], + "type": "text", + "content": "2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a \"fail secure\" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 311, + 51, + 562, + 72 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 51, + 562, + 72 + ], + "spans": [ + { + "bbox": [ + 311, + 51, + 562, + 72 + ], + "type": "text", + "content": "PQC upgrades happen in sync across critical systems or remain backward-compatible." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 310, + 73, + 562, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 73, + 562, + 312 + ], + "spans": [ + { + "bbox": [ + 310, + 73, + 562, + 312 + ], + "type": "text", + "content": "3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to \"start planning\" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 312, + 562, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 312, + 562, + 515 + ], + "spans": [ + { + "bbox": [ + 310, + 312, + 562, + 515 + ], + "type": "text", + "content": "4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 516, + 562, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 562, + 718 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 562, + 718 + ], + "type": "text", + "content": "5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 50, + 299, + 73 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 50, + 299, + 73 + ], + "spans": [ + { + "bbox": [ + 47, + 50, + 299, + 73 + ], + "type": "text", + "content": "extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 73, + 299, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 73, + 299, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 73, + 299, + 277 + ], + "type": "text", + "content": "6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 285, + 271, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 285, + 271, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 285, + 271, + 297 + ], + "type": "text", + "content": "B. Successful Implementations and Initiatives of PQC" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 300, + 299, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 299, + 394 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 299, + 394 + ], + "type": "text", + "content": "Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 396, + 299, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 396, + 299, + 658 + ], + "spans": [ + { + "bbox": [ + 47, + 396, + 299, + 658 + ], + "type": "text", + "content": "1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 659, + 299, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 299, + 719 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 299, + 719 + ], + "type": "text", + "content": "2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ-" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 49, + 563, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 49, + 563, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 49, + 563, + 168 + ], + "type": "text", + "content": "ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 310, + 169, + 563, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 169, + 563, + 336 + ], + "spans": [ + { + "bbox": [ + 310, + 169, + 563, + 336 + ], + "type": "text", + "content": "3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be \"quantum ready\" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 338, + 563, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 338, + 563, + 528 + ], + "spans": [ + { + "bbox": [ + 310, + 338, + 563, + 528 + ], + "type": "text", + "content": "Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 311, + 536, + 484, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 536, + 484, + 547 + ], + "spans": [ + { + "bbox": [ + 311, + 536, + 484, + 547 + ], + "type": "text", + "content": "C. Future Outlook and Recommendations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 552, + 563, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 552, + 563, + 682 + ], + "spans": [ + { + "bbox": [ + 310, + 552, + 563, + 682 + ], + "type": "text", + "content": "The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate \"harvest now, decrypt later\" risks. Awareness and education are also crucial for leadership and technical teams." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 311, + 683, + 563, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 683, + 563, + 719 + ], + "spans": [ + { + "bbox": [ + 311, + 683, + 563, + 719 + ], + "type": "text", + "content": "A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 49, + 301, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 49, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 45, + 49, + 301, + 156 + ], + "type": "text", + "content": "transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 45, + 157, + 301, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 157, + 301, + 276 + ], + "spans": [ + { + "bbox": [ + 45, + 157, + 301, + 276 + ], + "type": "text", + "content": "Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with \"quantum-safe\" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 276, + 300, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 276, + 300, + 373 + ], + "spans": [ + { + "bbox": [ + 45, + 276, + 300, + 373 + ], + "type": "text", + "content": "By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 376, + 212, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 376, + 212, + 387 + ], + "spans": [ + { + "bbox": [ + 133, + 376, + 212, + 387 + ], + "type": "text", + "content": "VI. CONCLUSION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 390, + 300, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 390, + 300, + 509 + ], + "spans": [ + { + "bbox": [ + 45, + 390, + 300, + 509 + ], + "type": "text", + "content": "The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 510, + 301, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 301, + 628 + ], + "type": "text", + "content": "However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 629, + 301, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 629, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 45, + 629, + 301, + 653 + ], + "type": "text", + "content": "Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 49, + 564, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 49, + 564, + 182 + ], + "spans": [ + { + "bbox": [ + 307, + 49, + 564, + 182 + ], + "type": "text", + "content": "standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 406, + 191, + 466, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 191, + 466, + 202 + ], + "spans": [ + { + "bbox": [ + 406, + 191, + 466, + 202 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 311, + 209, + 578, + 652 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 315, + 209, + 563, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 209, + 563, + 246 + ], + "spans": [ + { + "bbox": [ + 315, + 209, + 563, + 246 + ], + "type": "text", + "content": "[1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, \"Transitioning organizations to post-quantum cryptography,\" Nature, vol. 605, no. 7909, pp. 237–243, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 247, + 563, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 247, + 563, + 264 + ], + "spans": [ + { + "bbox": [ + 315, + 247, + 563, + 264 + ], + "type": "text", + "content": "[2] D. J. Bernstein and T. Lange, \"Post-quantum cryptography,\" Nature, vol. 549, no. 7671, pp. 188-194, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 265, + 563, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 265, + 563, + 300 + ], + "spans": [ + { + "bbox": [ + 315, + 265, + 563, + 300 + ], + "type": "text", + "content": "[3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., \"Status report on the first round of the NIST post-quantum cryptography standardization process,\" 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 300, + 563, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 300, + 563, + 337 + ], + "spans": [ + { + "bbox": [ + 315, + 300, + 563, + 337 + ], + "type": "text", + "content": "[4] National Institute of Standards and Technology, \"Post-Quantum Cryptography Standardization,\" 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 337, + 563, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 337, + 563, + 381 + ], + "spans": [ + { + "bbox": [ + 315, + 337, + 563, + 381 + ], + "type": "text", + "content": "[5] GSM Association, \"Post Quantum Cryptography - Guidelines for Telecom Use Cases,\" GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 382, + 578, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 382, + 578, + 408 + ], + "spans": [ + { + "bbox": [ + 315, + 382, + 578, + 408 + ], + "type": "text", + "content": "[6] PKI Consortium, \"Key takeaways of the PQC conference in Austin,\" January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 409, + 563, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 409, + 563, + 472 + ], + "spans": [ + { + "bbox": [ + 315, + 409, + 563, + 472 + ], + "type": "text", + "content": "[7] U. Government, \"Report on post-quantum cryptography,\" Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 472, + 563, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 472, + 563, + 508 + ], + "spans": [ + { + "bbox": [ + 315, + 472, + 563, + 508 + ], + "type": "text", + "content": "[8] J. Taaffe, \"Are telcos ready for a quantum leap?\" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 509, + 563, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 509, + 563, + 570 + ], + "spans": [ + { + "bbox": [ + 315, + 509, + 563, + 570 + ], + "type": "text", + "content": "[9] SoftBank Corp. and SandboxAQ, \"SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers,\" March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 311, + 571, + 563, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 571, + 563, + 607 + ], + "spans": [ + { + "bbox": [ + 311, + 571, + 563, + 607 + ], + "type": "text", + "content": "[10] SoftBank Corp., \"SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology,\" February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 311, + 607, + 563, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 607, + 563, + 652 + ], + "spans": [ + { + "bbox": [ + 311, + 607, + 563, + 652 + ], + "type": "text", + "content": "[11] Thales Group and SK Telecom, \"Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks,\" 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_content_list.json b/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f4f6e01021af7516d6e9c5945b6d9baeacdfd2b4 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_content_list.json @@ -0,0 +1,2235 @@ +[ + { + "type": "text", + "text": "Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning", + "text_level": 1, + "bbox": [ + 106, + 128, + 890, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Junming Liu $^{1,2}$ , Siyuan Meng $^{2,3}$ , Yanting Gao $^{1}$ , Song Mao $^{2}$ , Pinlong Cai $^{2}$ ,", + "bbox": [ + 204, + 202, + 790, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guohang Yan $^{2}$ , Yirong Chen $^{2,4}$ , Zilin Bian $^{5}$ , Ding Wang $^{2*}$ , Botian Shi $^{2}$", + "bbox": [ + 222, + 220, + 774, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tongji University $^{2}$ Shanghai Artificial Intelligence Laboratory", + "bbox": [ + 243, + 239, + 754, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ East China Normal University $^{4}$ Stanford University $^{5}$ New York University", + "bbox": [ + 187, + 256, + 808, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "liu_junming6917@tongji.edu.cn wangding@pjlab.org.cn", + "bbox": [ + 264, + 277, + 720, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 325, + 326, + 342 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Multimodal reasoning in Large Language Models (LLMs) struggles with incomplete knowledge and hallucination artifacts, challenges that textual Knowledge Graphs (KGs) only partially mitigate due to their modality isolation. While Multimodal Knowledge Graphs (MMKGs) promise enhanced cross-modal understanding, their practical construction is impeded by semantic narrowness of manual text annotations and inherent noise in visual-semantic entity linkages. In this paper, we propose Vision-align-to-Language integrated Knowledge Graph (VaLiK), a novel approach for constructing MMKGs that enhances LLMs reasoning through cross-modal information supplementation. Specifically, we cascade pre-trained Vision-Language Models (VLMs) to align image features with text, transforming them into descriptions that encapsulate image-specific information. Furthermore, we developed a cross-modal similarity verification mechanism to quantify semantic consistency, effectively filtering out noise introduced during feature alignment. Even without manually annotated image captions, the refined descriptions alone suffice to construct the MMKG. Compared to conventional MMKGs construction paradigms, our approach achieves substantial storage efficiency gains while maintaining direct entity-to-image linkage capability. Experimental results on multimodal reasoning tasks demonstrate that LLMs augmented with VaLiK outperform previous state-of-the-art models. Our code is published at https://github.com/Wings-Of-Disaster/VaLiK.", + "bbox": [ + 88, + 357, + 483, + 765 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 792, + 220, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in Large Language Models (LLMs) [2, 10, 26, 66] have demonstrated their superiority and versatility across various Natural Language Reasoning (NLR) tasks [9, 44, 54, 59]. To enhance LLMs into the", + "bbox": [ + 89, + 818, + 483, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4f0401946432d33d9c1cf582ceba501170762e9bc5154f160d35a6b7809d9e45.jpg", + "image_caption": [ + "Figure 1. (a) Training entity extraction models relies on extensive fine-grained annotations, increasing labeling costs. More examples are provided in Appendix B. (b) Capturing implicit semantic associations demands abstract comprehension or logical inference." + ], + "image_footnote": [], + "bbox": [ + 517, + 327, + 901, + 685 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "realm of multimodal reasoning, researchers [65, 72, 75, 80] have endeavored to equip these models with multimodal capabilities, as evidenced by advancements in Multimodal Large Language Models (MLLMs) such as BLIP-2 [41], GPT-4o [33], Janus-Pro [14], among others. Despite their notable progress, these models often experience hallucinations [5, 35], primarily arising from knowledge deficiencies due to incomplete or obsolete information.", + "bbox": [ + 511, + 779, + 906, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.12972v3 [cs.CV] 21 Nov 2025", + "bbox": [ + 22, + 273, + 57, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 109, + 887, + 235, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fine-tuning LLMs demands prohibitive computational costs [32]. While text-based Knowledge Graphs (KGs) have partially addressed this limitation by efficient real-time updates [6, 63, 73], they are still restricted by modal isolation, which hinders cross-modal reasoning, as detailed in Appendix A. To bridge this semantic fragmentation, Multimodal Knowledge Graphs (MMKGs) have been developed as unified representational frameworks [11, 34, 39, 46].", + "bbox": [ + 89, + 90, + 480, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, constructing robust MMKGs faces two primary obstacles [16, 90]. First, the lack of large-scale fine-grained entity-image corpora makes it infeasible to train high-quality entity extractors, significantly constraining scalability, as illustrated in Figure 1a. Second, conventional visual relation detectors primarily identify superficial spatial interactions instead of semantic relations consistent with KGs, while frequently hallucinating implausible connections that corrupt graph integrity, as shown in Figure 1b.", + "bbox": [ + 89, + 214, + 480, + 351 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose VaLiK, short for Vision-align-to-Language integrated Knowledge Graph, a novel framework designed to empower LLMs with advanced multimodal reasoning. Unlike traditional methods that rely on text annotations for training extraction models and the knowledge construction process [55], VaLiK adopts a annotation-free approach to MMKGs construction. Specifically, we first employ several pretrained Vision-Language models (VLMs), designed based on Chain-of-Experts (CoE) principles [74], to convert visual inputs into image-specific textual descriptions through cross-modal feature alignment. This procedure eliminates the need for manually annotated image captions in both the knowledge extraction and construction phases while preserving visual details typically missing in generic text descriptions. Moreover, in contrast to existing relation detection methods that require predefined label taxonomies [17, 61, 82, 85], VaLiK excels at extracting profound semantic relationships that are both KG-compatible and capture novel associations beyond training supervision. While VLMs enable cross-modal reasoning and interpretation, they introduce spurious relational noise through hallucinated inter-modal attributions, as depicted in Figure 2. We address this limitation through cross-modal similarity recalibration, strategically filtering inconsistent information while preserving valid semantic correspondences. Finally, the purified descriptions are systematically organized into MMKGs via LLM-driven symbolic structuring [28], bridging visual and textual domains with factual consistency.", + "bbox": [ + 89, + 354, + 482, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To thoroughly evaluate the VaLiK method, we conduct a comprehensive assessment across two critical multimodal benchmarks: multimodal classification (tested on the CrisisMMD dataset [3]) and multimodal question answering (evaluated via the ScienceQA benchmark [48]). The experiments span diverse LLM architectures and MMKG construction techniques to ensure the framework's robustness.", + "bbox": [ + 89, + 795, + 480, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0565a1818d28d5c065a7ad32bab646f9d5e0994ab7c5dbfd84d22b45087b7151.jpg", + "image_caption": [ + "Figure 2. Feature-aligned descriptions from VLMs introduce redundant and inaccurate relationship patterns." + ], + "image_footnote": [], + "bbox": [ + 517, + 92, + 901, + 262 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The experimental results demonstrate that the MMKGs constructed by VaLiK achieve superior multimodal reasoning performance in LLMs while requiring substantially less storage than conventional approaches. More importantly, the proposed approach retains direct entity-to-image linkage capabilities even with the compressed graph structure.", + "bbox": [ + 511, + 316, + 903, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, VaLiK is the first framework that enables end-to-end, annotation-free, zero-shot, and storage-efficient multimodal knowledge construction with high adaptability and scalability. Our key contributions include:", + "bbox": [ + 511, + 409, + 903, + 469 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- To the best of our knowledge, VaLik is the first end-to-end framework to build Annotation-Free MMKGs to improve LLMs' multimodal reasoning capabilities, effectively eliminating the need for manually annotated textual material and enabling a completely autonomous multimodal knowledge generation process.", + "- We offer an innovative zero-shot method for constructing MMKG that captures deep semantic connections beyond traditional predetermined labels with an effective verification system that guarantees the accuracy of these relationships. The knowledge distillation paradigm greatly decreases storage while maintaining semantic integrity.", + "- We develop a highly modular and extensible architecture that allows VaLiK to effortlessly incorporate new models and workflows for specialized domain tasks, facilitating rapid adaptation to diverse application scenarios without incurring expensive system changes." + ], + "bbox": [ + 513, + 470, + 903, + 728 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 744, + 653, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Multimodal Knowledge Graphs", + "text_level": 1, + "bbox": [ + 511, + 771, + 792, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The principal advantage of MMKGs resides in their multimodal integration beyond conventional KGs. By linking entities with corresponding visual or textual data, MMKGs introduce valuable visual and textual information to the knowledge base, substantially advancing multimodal reasoning capabilities. This combination addresses core challenges in tasks that inherently demand multimodal synergy", + "bbox": [ + 511, + 794, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "like autonomous driving [27, 29], image-text retrieval [24, 87] and robotic manipulation [52, 58]. However, constructing trustworthy MMKGs with minimal manual effort remains a critical challenge. Recent studies have proposed innovative strategies to enhance MMKG reliability and utility. For instance, Chen et al. [13] proposed MSPT, a framework addressing continual MMKG construction through gradient modulation for balanced multimodal learning and attention distillation to mitigate catastrophic forgetting. Song et al. [61] developed Scene-MMKG, integrating knowledge engineering with large language models to improve robotic manipulation by resolving data sparsity and knowledge uncertainty. Wang et al. [70] introduced TIVA-KG, the first quad-modal knowledge graph spanning text, image, video, and audio with triplet grounding, empirically validating its effectiveness in downstream tasks. While these advances enhance multimodal reasoning capabilities, their efficacy remains rooted in resource-intensive paradigms, requiring extensively annotated datasets for knowledge acquisition.", + "bbox": [ + 89, + 90, + 480, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Knowledge-Augmented Multimodal Learning", + "text_level": 1, + "bbox": [ + 89, + 388, + 478, + 406 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multimodal learning has seen significant progress in aligning and integrating information across different data modalities [7, 45, 76]. The incorporation of structured knowledge through MMKGs further enhances these approaches, improving the reasoning capabilities and generalization across a variety of domains, such as visual question answering [51, 60, 68], recommendation systems [18, 62, 71], and classification [31, 56, 84]. Methods like GraphAdapter's dual-KG adaptation [42] and contrastive multi-relational encoding with KGs [23] inject external knowledge into models, refining their performance and improving their capability to handle complex tasks. Additionally, Lee et al. [39] proposed MR-MKG, a novel framework that constructs task-specific MMKGs to enhance multimodal reasoning in LLMs. These knowledge-augmented paradigms demonstrate superior cross-modal semantic grounding compared to unimodal approaches [15, 36]. However, their reliance on preconstructed MMKGs often leads to domain discrepancies, where generic knowledge schemas misalign with task-specific reasoning patterns, ultimately limiting contextual precision in target applications.", + "bbox": [ + 89, + 412, + 482, + 731 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Multimodal Large Language Models", + "text_level": 1, + "bbox": [ + 89, + 741, + 410, + 758 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The limitations of text-only LLMs in meeting increasingly complex demands have spurred extensive research [79, 83, 86] into developing LLMs capable of effectively processing and reasoning over multimodal inputs. Current research predominantly employs adapter or projection layers to connect the embedding spaces of various modality-specific encoders with the textual embedding space of LLMs [39]. For instance, foundational models like CLIP [57] and BLIP [40] pioneered cross-modal alignment by jointly training vision", + "bbox": [ + 89, + 763, + 482, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and text encoders to map images and text into a shared embedding space. Building on this, LLaVA [43] and Flamingo [4] advanced the field by integrating visual encoders with LLMs, enabling more nuanced multimodal understanding and generation. More recently, Gemini [64], Qwen2-VL [69] and GPT-4o [33] have further pushed the boundaries by scaling up multimodal pretraining and introducing sophisticated mechanisms for cross-modal interaction. However, multimodal LLMs remain prone to hallucinations. While they enhance cross-modal alignment, they neither acquire new knowledge nor avoid introducing noise through integration. To address these limitations, VaLiK \"uses the master's tools to refine the master's craft,\" first constructing MMKGs via MLLMs and then leveraging them to enhance MLLMs' reasoning capabilities.", + "bbox": [ + 511, + 90, + 906, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 511, + 332, + 604, + 347 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present the technical details of VaLiK. VaLiK introduces a novel expansion-reduction paradigm for visual knowledge extraction. The architecture initially organizes several VLMs with distinct knowledge domains, designed based on CoE principles [74], to produce comprehensive textual descriptions encompassing hierarchical visual details. A cross-modal similarity verification mechanism then iteratively filters out noisy tokens through cross-modal alignment while preserving semantically salient elements. This optimization-style approach eliminates external textual dependencies while enabling effective MMKG construction. VaLiK's framework is shown in Figure 3.", + "bbox": [ + 511, + 357, + 906, + 540 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. CoE-based Visual to Language Modeling", + "text_level": 1, + "bbox": [ + 511, + 549, + 864, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent entity detection techniques [20, 81, 91] have been widely adopted for entity and relation extraction in MMKG construction. However, these methods are inherently limited by predefined categorical boundaries, lacking the capacity to recognize visual concepts outside their training vocabulary. In contrast, VLMs pretrained on web-scale corpora [12, 41, 89] exhibit broader recognition capabilities through exposure to diverse visual concepts.", + "bbox": [ + 511, + 571, + 905, + 691 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We therefore leverage pretrained VLMs to extract comprehensive visual information. This process removes the necessity for detailed fine-grained data typically required to train specialized recognition models. The generalized vision to language conversion pipeline can be formalized as:", + "bbox": [ + 511, + 691, + 905, + 768 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nS = \\mathcal {D} _ {\\text {t e x t}} \\left(\\mathcal {A} \\left(\\mathcal {E} _ {\\text {v i s}} (I)\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 625, + 777, + 906, + 804 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $I$ denotes for the input image, $\\mathcal{E}_{\\mathrm{vis}}$ denotes the visual encoder extracting visual features, $\\mathcal{A}$ carries out cross-modal feature alignment and interaction, and $\\mathcal{D}_{\\mathrm{text}}$ generates textual tokens through autoregressive decoding. The resulting visual description $S = \\{w_{1},\\dots,w_{n}\\}$ emerges from this multi-stage processing.", + "bbox": [ + 511, + 809, + 905, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3ae9a45583be9946e86dc7de188a71f381a9113b43215237e1f028a2f67cfac2.jpg", + "image_caption": [ + "Figure 3. The pipeline of VaLiK: First, large-scale visual descriptions are generated using CoE-based VLMs. Then, a similarity verification mechanism is used to prune irrelevant information. Finally, MMKGs are constructed using LLMs based on LightRAG. The constructed MMKGs can assist LLMs in multimodal reasoning, alleviating the hallucination issues caused by incomplete knowledge." + ], + "image_footnote": [], + "bbox": [ + 93, + 89, + 903, + 300 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, quantitative analysis uncovers considerable discrepancies between machine-generated and human-annotated descriptions [88]. As an illustration, while utilizing BLIP-2 [41] to generate sample captions, we noted that the model outputs are markedly concise and devoid of visual specifics, as detailed in Appendix C. To bridge this gap, we implement CoE enhanced generation through cascade VLMs processing. At iteration step $t$ , each expert $E_{i}$ receives both the original visual signals $I$ and the contextual output from the preceding expert $E_{i - 1}$ :", + "bbox": [ + 88, + 363, + 483, + 513 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {i} ^ {(t)} = E _ {i} \\left(I, \\mathcal {S} _ {i - 1} ^ {(t - 1)}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 526, + 482, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $S_{i - 1}^{(t - 1)}$ denotes the description from expert $E_{i - 1}$ at step $t - 1$ , with $S_0^{(t)}\\coloneqq \\emptyset$ for initialization.", + "bbox": [ + 89, + 565, + 482, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, each expert $E_{i}$ implements a unified visual-language processing task:", + "bbox": [ + 89, + 601, + 482, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1. Visual Feature Extraction:", + "text_level": 1, + "bbox": [ + 89, + 635, + 302, + 648 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {V} _ {i} = \\operatorname {E n c} _ {\\text {v i s}} ^ {i} (I) \\in \\mathbb {R} ^ {d _ {v} \\times N _ {p}}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 661, + 482, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathsf{Enc}_{\\mathrm{vis}}^i$ denotes established visual encoder [21, 30, 47] producing $N_{p}$ patch embeddings with dimension $d_v$ .", + "bbox": [ + 109, + 691, + 482, + 723 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2. Cross-Modal Interaction and Generation:", + "text_level": 1, + "bbox": [ + 89, + 724, + 406, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VLMs integrate pretrained learnable query embeddings $\\mathbf{Q}_i\\in \\mathbb{R}^{d_q\\times L_q}$ to interact with visual features $\\mathbf{V}_i\\in$ $\\mathbb{R}^{d_v\\times N_p}$ via cross-attention [67]:", + "bbox": [ + 109, + 739, + 482, + 786 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {H} _ {i} = \\operatorname {C r o s s A t t n} \\left(\\mathbf {Q} _ {i}, \\mathbf {V} _ {i}\\right) \\\\ = \\operatorname {s o f t m a x} \\left(\\frac {\\mathbf {Q} _ {i} \\mathbf {W} _ {q} ^ {i} \\left(\\mathbf {V} _ {i} \\mathbf {W} _ {k} ^ {i}\\right) ^ {\\top}}{\\sqrt {d _ {k}}}\\right) \\mathbf {V} _ {i} \\mathbf {W} _ {v} ^ {i}, \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 796, + 482, + 858 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{W}_q^i\\in \\mathbb{R}^{d_q\\times d_k}$ , $\\mathbf{W}_k^i$ , $\\mathbf{W}_v^i\\in \\mathbb{R}^{d_v\\times d_k}$ , and $L_{q}$ denotes the predefined query length. Cross-attention serves", + "bbox": [ + 109, + 868, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "as a prevalent approach, while other interaction strategies coexist [4]. The adopted VLMs in our implementation primarily rely on this approach for modality fusion.", + "bbox": [ + 531, + 363, + 903, + 409 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Text Generation:", + "text_level": 1, + "bbox": [ + 513, + 411, + 658, + 424 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The text encoder $\\mathsf{Enc}_{\\mathrm{text}}^{i}$ first processes the preceding expert's output $S_{i - 1}^{(t - 1)}$ into latent features:", + "bbox": [ + 531, + 424, + 903, + 460 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {P} _ {i} = \\operatorname {E n c} _ {\\text {t e x t}} ^ {i} \\left(S _ {i - 1} ^ {(t - 1)}\\right) \\in \\mathbb {R} ^ {d _ {t} \\times L}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 472, + 903, + 492 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Subsequently, the text decoder $\\mathrm{Dec}_{\\mathrm{text}}^{i}$ synthesizes the final output $S_{i}^{(t)}$ by jointly conditioning on $\\mathbf{P}_i$ and $\\mathbf{H}_i$ :", + "bbox": [ + 532, + 506, + 903, + 540 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {i} ^ {(t)} = \\operatorname {D e c} _ {\\text {t e x t}} ^ {i} \\left(\\mathbf {P} _ {i}, \\mathbf {H} _ {i}\\right) = \\left\\{w _ {1} ^ {(t, i)}, \\dots , w _ {m} ^ {(t, i)} \\right\\}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 564, + 903, + 585 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ultimately, the final textual description $S_N^{(C)}$ is obtained after $C$ iteration steps through $N$ cascaded experts.", + "bbox": [ + 511, + 597, + 903, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Cross-Modal Similarity Verification", + "text_level": 1, + "bbox": [ + 511, + 640, + 823, + 656 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address noise in VLM-generated captions, we design a sliding window mechanism with semantic consistency verification. This method ensures that only relevant and semantically consistent segments are retained in the final description. Let $W_{k}$ denote the $k$ -th window containing $m$ consecutive tokens $\\{w_{km + 1},\\dots ,w_{(k + 1)m}\\}$ . For each window, we compute its cross-modal similarity score:", + "bbox": [ + 511, + 662, + 906, + 768 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {k} = \\frac {\\operatorname {E n c} _ {\\text {v i s}} (I) \\cdot \\operatorname {E n c} _ {\\text {t e x t}} \\left(W _ {k}\\right)}{\\| \\operatorname {E n c} _ {\\text {v i s}} (I) \\| \\| \\operatorname {E n c} _ {\\text {t e x t}} \\left(W _ {k}\\right) \\|}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 779, + 903, + 814 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathsf{Enc}_{vis/text}(\\cdot)$ adopts a lightweight CLIP [59] encoder-decoder with frozen parameters for efficient processing. The similarity score $\\alpha_{k}$ lies within the range [0, 1], with higher values indicating a stronger alignment between the visual and textual information.", + "bbox": [ + 511, + 824, + 906, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After calculating the cross-modal similarity for each window, we employ an empirical threshold $\\tau$ to filter out low-similarity windows. This threshold helps to identify and discard noisy or irrelevant sections of the generated caption that do not align well with the visual content, thereby reducing the impact of inaccurate or misleading descriptions. Formally, for each window $W_{k}$ , if $\\alpha_{k} < \\tau$ , the window is discarded as noise. This process effectively prunes windows with low similarity scores, ensuring that only semantically meaningful segments remain. The final denoised description $\\hat{S}$ is obtained by concatenating all windows $W_{k}$ for which $\\alpha_{k} \\geq \\tau$ :", + "bbox": [ + 89, + 90, + 483, + 272 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {S} = \\bigcup_ {\\alpha_ {k} \\geq \\tau} W _ {k}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 279, + 483, + 313 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our window size $m$ is flexibly determined and generally adapts dynamically to natural sentence segmentation.", + "bbox": [ + 89, + 320, + 483, + 351 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. MMKG Construction for Enhanced Reasoning", + "text_level": 1, + "bbox": [ + 89, + 358, + 483, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LLMs have become increasingly popular for identifying entities, relationships, and attributes within a corpus, which are then organized into a KG. The strength of LLM-based KG generation lies in its capacity to leverage the vast amount of knowledge encoded within these models, allowing them to detect complex and nuanced patterns across diverse data sources. This approach eliminates the need for manual annotation, enabling a highly scalable and domain-adaptive process suitable for a wide range of applications.", + "bbox": [ + 89, + 380, + 483, + 516 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We begin by refining the generated textual description $\\hat{S}$ (VLM-based information), which is then optionally concatenated with any available external textual knowledge $T$ to form the input for KG generation. This combined input is used to generate MMKGs with the help of a LLM [22, 28], leveraging its capacity for multi-hop reasoning and dynamic knowledge integration.", + "bbox": [ + 89, + 517, + 483, + 622 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {G} = \\operatorname {L L M} (\\hat {S} \\oplus T), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 632, + 482, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\oplus$ denotes optional concatenation based on the availability of $T$ . The resulting graph $\\mathcal{G}$ captures both visual and textual relationships inferred by the LLM.", + "bbox": [ + 89, + 660, + 482, + 705 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We define $\\mathcal{G}$ as a set of triplets:", + "bbox": [ + 109, + 705, + 318, + 720 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {G} = \\{(h, r, t) \\mid h, t \\in \\mathcal {E}, r \\in \\mathcal {R} \\}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 729, + 482, + 747 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{E}$ and $\\mathcal{R}$ denote the sets of entities and relations. Entities include objects or concepts from the image or external text, while relations describe connections such as \"is a type of,\" \"part of,\" or \"has property.\" Each triplet $(h,r,t)$ links a head entity $h$ and a tail entity $t$ via relation $r$ .", + "bbox": [ + 89, + 753, + 483, + 830 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multimodal Reasoning Enhancement. To support multimodal reasoning, we retrieve relevant triplets from $\\mathcal{G}$ through structural patterns during LLMs inference:", + "bbox": [ + 89, + 830, + 483, + 876 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {G} _ {q} = \\operatorname {R e t r i e v e} (q, \\mathcal {G}), \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 883, + 482, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\text{Retrieve}(\\cdot)$ denotes a retrieval strategy that identifies subgraphs relevant to the query for reasoning. Detailed retrieval strategies are described in Appendix D.", + "bbox": [ + 511, + 90, + 903, + 136 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The augmented prompt integrates multimodal evidence:", + "bbox": [ + 532, + 137, + 903, + 152 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\mathrm {a u g}} = q \\left\\|\\left(\\bigcup_ {(h, r, t) \\in \\mathcal {G} _ {q}} [ h ] \\rightarrow r \\rightarrow [ t ]\\right). \\right. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 165, + 903, + 200 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that we incorporate the storage locations of images in the database during MMKGs construction, enabling the MMKGs to link to visual data. VaLiK enables text-only LLMs to perform multimodal reasoning through $\\mathcal{G}$ 's visual associations, while VLMs refresh knowledge representations by jointly injecting both visual and textual information, significantly mitigating hallucination risks.", + "bbox": [ + 511, + 205, + 906, + 313 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 511, + 327, + 637, + 343 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Setups", + "text_level": 1, + "bbox": [ + 511, + 352, + 601, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Datasets. We evaluate VaLiK on two multimodal reasoning benchmarks with distinct characteristics:", + "bbox": [ + 511, + 375, + 903, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- CrisisMMD [3]. This real-world disaster response dataset includes around 35,000 noisy social media postings with paired images and text, each annotated for seven catastrophe categories and four severity levels. Its realistic user-generated content with natural noise and implicit modality correlations provides a rigorous testbed for zero-shot adaptation, with good performance indicating practical relevance in real-world crisis scenarios.", + "bbox": [ + 513, + 406, + 903, + 526 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- ScienceQA [48]. This dataset contains 21,208 multimodal science questions combining textual and visual contexts, with $48.7\\%$ of instances containing images. Questions span physics, chemistry, and biology domains, requiring cross-modal reasoning between textual concepts and visual diagrams. Additionally, ScienceQA offers image captions to aid text-only LLMs in reasoning, allowing a comparison of unimodal approaches.", + "bbox": [ + 513, + 527, + 905, + 647 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Task Formulation. For CrisisMMD, we define three multimodal classification tasks1: (1) binary information relevance filtering, (2) fine-grained humanitarian category recognition, and (3) a consolidated taxonomy with merged categories to reduce label complexity. We omit the unimodal damage assessment to focus on multimodal aspects. For ScienceQA, we follow the original evaluation using multiple metrics: question types, contextual modalities, and educational stages. Performance is assessed through accuracy percentage across these categories.", + "bbox": [ + 511, + 648, + 906, + 799 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines. We conduct a comprehensive evaluation of text-only LLMs, multimodal VLMs, and KGs that enhance LLMs in multimodal reasoning.", + "bbox": [ + 511, + 799, + 903, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- For CrisisMMD, we compare text-only LLMs using few-shot prompting (LLaMA-2 [66], GPT-4 [2],", + "bbox": [ + 513, + 845, + 903, + 876 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "This setting references the repository GitHub and Abavisani et al. [1]", + "bbox": [ + 529, + 886, + 903, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/4afcd56b1fa4990fa61cf4b7cfa0d337feceab24bf56a82c3aa78cb2285b2360.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskText-only LLMsKG-Enhanced LLMs
LLaMA-2GPT-4DeepSeek-R1Qwen2.5LightRAGVaLiK
7B13B70B-7B8B32B70B7B32B72BText-onlyImage-onlyText-Image
Task 162.3263.8063.1566.8367.2363.3163.6165.5365.0467.2867.9567.4969.5268.90
Task 218.3221.8228.8747.2526.5325.4924.7721.0544.5246.9450.5145.1149.5450.02
Task 2 Merged21.4533.1536.8949.4425.8523.5621.5525.5745.3347.0750.2945.9449.0750.69
", + "bbox": [ + 94, + 88, + 903, + 181 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/daad939622d1365740ad713921e37ab22dcfbea1497d31a40eb87a78fb1f9c75.jpg", + "table_caption": [ + "Table 1. The performance evaluation of text-only LLMs using few-shot prompting without any fine-tuning on the training set. As these models handle text only, test data is formatted as unimodal text for compatibility. In our implementations, both LightRAG and VaLiK adopt Qwen2.5-7B as the base reasoning model. Bold indicates the highest value, and underline indicates the second highest." + ], + "table_footnote": [], + "table_body": "
TaskMultimodal VLMsKG-Enhanced LLMs
CLIPLLaVABLIP-2GPT-4oQwen2-VLVaLiK
ViT-L/147B13B34BFlan-T5-XLOPT-2B-I7B-I72B-I*#+~
Task 143.3654.0060.5856.4461.2938.6268.2047.5662.4565.8060.7868.4461.1168.89
Task 217.8828.0120.1425.1540.8614.2647.587.6032.6847.2125.8048.8827.2349.78
Task 2-M20.7930.6123.4425.0740.7214.2749.557.4234.2048.2827.3149.2729.0949.31
", + "bbox": [ + 94, + 246, + 919, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. The performance of multimodal VLMs and KG-enhanced LLMs. The -I suffix denotes instruction-tuned variants. Symbol markers denote KG types and models: the asterisk (*) represents image-only KG with LLaVA-34B, hash (#) indicates image-only KG using Qwen2-VL-72B-I, plus (+) denotes text-image KG with LLaVA-34B, and tilde (\\*) shows text-image KG using Qwen2-VL-72B-I.", + "bbox": [ + 89, + 359, + 906, + 402 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "DeepSeek-R1 [26], Qwen-2.5 [77]) and multimodal VLMs (CLIP [57], LLaVA [43], GPT-4o [33], Qwen2-VL [69], BLIP-2 [41]).", + "bbox": [ + 102, + 428, + 480, + 472 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For ScienceQA, we compare models for general domains in zero/few-shot settings, including text-only LLMs (GPT Model [48], CoT [48], DDCoT [86]), multimodal VLMs (LG-VQA [25], LaVIN [50], BLIP-2, CCOT [53], GraphVis [19]) and Tool-LLM Chameleon [49]. These models are not specifically fine-tuned for scientific tasks, ensuring a fair evaluation of generalization capabilities.", + "- We further compare the multimodal reasoning performance of LLMs assisted by KGs, evaluating text-based KGs built with LightRAG [28], and pre-constructed MMKGs such as Visual Genome [38] and Mmkg [46]." + ], + "bbox": [ + 89, + 474, + 482, + 640 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation. For MMKG construction, we design a chain of VLMs including BLIP-2, LLaVA, and Qwen2-VL, with the CLIP-ViT-L/14 for pruning. Stronger or additional VLMs could be employed to enhance performance if more computational resources are available. We use the entire training set as the knowledge base and construct MMKGs from the extracted descriptions based on the LightRAG framework. In comparative experiments, the LightRAG method we evaluate utilizes only textual data, while VaLiK employs two configurations: (1) fully image-generated text descriptions (Image-only), and (2) original text combined with image-generated text (Text-Image). Dynamic window partitioning based on sentence length ensures syntactically coherent pruning results. Similarity thresholds are set to $\\tau = 0.25$ for CrisisMMD and $\\tau = 0.20$ for ScienceQA based on empirical evaluations to balance precision and recall. See Appendix E for selection details. We construct the", + "bbox": [ + 88, + 643, + 482, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "graph using DeepSeek-R1-70B and implement LightRAG's hybrid retrieval approach with Qwen2.5-7B. For graph construction and multimodal reasoning, we utilize $1 \\times$ NVIDIA A100-80GB GPUs. Task-specific prompts are designed to assist LLMs in multimodal reasoning evaluation.", + "bbox": [ + 511, + 428, + 906, + 503 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Main Results", + "text_level": 1, + "bbox": [ + 511, + 513, + 653, + 529 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Multimodal Classification Tasks. We conduct multimodal classification experiments on the CrisisMMD dataset, evaluating both text-only LLMs and multimodal VLMs. Detailed comparative results are provided in Tables 1 and 2. For text-only LLMs, we adopt Qwen2.5-7B as the foundational reasoning model. Remarkably, the VaLiK-enhanced version achieves state-of-the-art (SOTA) performance matching that of the native Qwen2.5-72B model. The image-only KG constructed through VaLiK demonstrates an average accuracy improvement of $4.41\\%$ across tasks, with the text-image variant attaining a $4.90\\%$ enhancement. These improvements significantly surpass the $1.22\\%$ gain obtained by LightRAG using textual KG. We further validate VaLiK's cross-scale applicability through evaluations on Qwen2.5-32B and 72B architectures, observing consistent $2.0\\% - 2.5\\%$ improvements. While not as significant as the 7B model's benefits, this shows that models that have substantial prior knowledge benefit less from external knowledge augmentation", + "bbox": [ + 511, + 537, + 906, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Unlike text-only LLMs that depend on MMKGs for visual understanding, VLMs primarily benefit from KGs integration through outdated knowledge refreshment. Due to the inherent availability of visual features during inference, VaLiK's performance gains for VLMs remain con", + "bbox": [ + 511, + 824, + 905, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/4ed5640ca4dff132eb932d23112990edf9118e03775355bded4484c067665b93.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Human [48]-90.2384.9787.4889.6087.5088.1091.5982.4288.40
GPT-4 [43]-84.0673.4587.3681.8770.7590.7384.6979.1082.69
CoT (GPT-3) [48]173B75.4470.8778.0974.6867.4379.9378.2369.6875.17
CoT (UnifiedQA) [48]223M71.0076.0478.9166.4266.5381.8177.0668.8274.11
CoT (GPT-4) [49]1T+85.4872.4490.2782.6571.4992.8986.6679.0483.99
DDCoT [86]175B80.1576.7282.8278.8972.5385.0282.8675.2180.15
Chameleon (ChatGPT) [49]175B+81.6270.6484.0079.7770.8086.6281.8676.5379.93
LG-VQA (BLIP-2) [25]---------86.32
LaVIN-13B [78]---------77.54
BLIP-2 [78]---------74.17
CCOT7B--------76.84
GraphVis [19]7B--------73.18
Qwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
Qwen2.5-72B72B79.6467.1084.9077.5665.0087.9380.2574.8578.37
Qwen2.5-7B (Mmkg) [46]7B73.9866.3778.1871.6564.3079.6576.5168.0373.47
Qwen2.5-7B (Visual Genome) [38]7B76.7867.0478.0974.0566.1979.7278.0869.6875.08
Qwen2.5-7B (VaLiK Text-only)7B84.5474.2486.9182.7472.5390.0384.5180.2882.98
Qwen2.5-7B (VaLiK Image-only)7B79.1471.5479.2777.1669.7283.1480.6573.9678.88
Qwen2.5-7B (VaLiK Text-Image)7B84.1575.1487.6482.9973.1889.6984.4080.9583.16
Qwen2.5-72B (VaLiK Text-Image)72B85.6175.9390.2784.4074.1792.3385.7982.9884.77
", + "bbox": [ + 94, + 88, + 903, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Performance comparison (\\%) on ScienceQA benchmark. #T-Params denotes trainable parameters. Categories: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG-Cap (image caption), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12). Method groups: (1) Human performance baseline, (2) Zero/Few-shot text-only LLMs, (3) Zero/Few-shot Multimodal VLMs, (4) LLMs enhanced with knowledge graphs for multimodal reasoning.", + "bbox": [ + 89, + 436, + 906, + 494 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "strained compared to text-only counterparts. We separately applied VaLiK enhancement to Qwen2-VL-72B-Instruct and LLaVA-34B, obtaining distinct improvements: LLaVA-34B achieves accuracy gains of $2.41\\%$ (image-only KG) and $3.59\\%$ (text-image KG), while Qwen2-VL-72B-Instruct shows $1.77\\%$ and $2.23\\%$ improvements respectively under identical configurations. These experimental findings collectively demonstrate that VaLiK effectively extracts valuable signals from the training corpus and enables dynamic knowledge injection into VLMs during inference, thereby substantially alleviating hallucination phenomena. The differential improvements between Qwen2-VL-72B-Instruct and LLaVA-34B further validate the framework's adaptability across model architectures.", + "bbox": [ + 88, + 520, + 480, + 731 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we analyze the results of LLMs without KG enhancement in the tables, which generally follow the scaling law [37]. However, DeepSeek-R1 shows anomalous behavior. Through testing, we find that its reasoning process may introduce complex information that interferes with its judgment. Furthermore, empirical results show that most baseline models achieve suboptimal performance without fine-tuning. In contrast, VaLiK's automated MMKG construction framework requires no task-specific adaptation yet delivers consistent improvements.", + "bbox": [ + 89, + 732, + 482, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Multimodal Question Answering Tasks. We evalu", + "bbox": [ + 109, + 885, + 480, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ated multimodal QA performance on the ScienceQA benchmark with Qwen2.5-7B and Qwen2.5-72B as base architectures, augmented by four knowledge sources: Mmkg, Visual Genome, text-only LightRAG and VaLiK. Compared to existing zero-shot/few-shot LLMs that not specifically optimized for scientific QA, our VaLiK-enhanced Qwen2.5-72B achieved SOTA performance on $62.5\\%$ of subtasks, demonstrating particular strengths in multimodal reasoning scenarios requiring cross-modal alignment with an average accuracy gain of $6.4\\%$ over baseline models.", + "bbox": [ + 511, + 520, + 903, + 671 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our study identifies a fundamental imbalance between textual and visual knowledge representations in ScienceQA. Text-only KGs (14k entities, 18k relations) exhibit $8 \\times$ denser structured knowledge than image-only counterparts (3k concepts, 1k relations), explaining visual modality underperformance. Despite this gap, vision-KG-augmented Qwen2.5-7B still attains $4.16\\%$ accuracy gains over its non-enhanced version. Notably, our MMKG requires only 489MB storage for complete storage, while the scene graph component2 of Visual Genome alone occupies 739MB. This lightweight construction enables effective reasoning using only textual KG descriptions without raw images in resource-constrained scenarios.", + "bbox": [ + 511, + 672, + 906, + 869 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "2Visual Genome", + "bbox": [ + 531, + 886, + 624, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4c07c1b06d4321725777668e77059d3bd720c3641b208a370e2a8d113fdb9bbe.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeMethod#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Image-OnlyQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B80.06 (↑3.86)70.30 (↑2.47)80.55 (↑3.28)78.05 (↑3.56)68.43 (↑2.64)83.76 (↑4.74)81.17 (↑3.45)72.71 (↑3.36)78.14 (↑3.42)
+ SV7B79.14 (↓0.92)71.54 (↑1.24)79.27 (↓1.28)77.16 (↓0.89)69.72 (↑1.29)83.14 (↓0.62)80.65 (↓0.52)73.96 (↑1.25)78.88 (↑0.74)
Text-ImageQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B81.88 (↑5.68)73.00 (↑5.17)84.00 (↑6.73)80.55 (↑6.06)70.05 (↑4.26)87.11 (↑8.09)82.01 (↑4.29)77.98 (↑8.63)80.57 (↑5.85)
+ SV7B84.15 (↑2.27)75.14 (↑2.14)87.64 (↑3.64)82.99 (↑2.44)73.18 (↑3.13)89.69 (↑2.58)84.40 (↑2.39)80.95 (↑2.97)83.16 (↑2.59)
", + "bbox": [ + 94, + 88, + 906, + 191 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a0c8d9c34fe5e9db284faa164d7f4985af9b1ae1e7b21f3d54728b3d6c9b0494.jpg", + "table_caption": [ + "Table 4. Ablation study on ScienceQA benchmark (CVs: CoE-based Vision-Language Models; SV: Similarly Verification). Performance metrics include: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG (image context), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12)." + ], + "table_footnote": [], + "table_body": "
TypeMethodTask 1 (%)Task 2 (%)Task 2-Merged (%)
Image-OnlyQwen2.5-7B65.0444.5245.33
+ CVs68.11 (↑3.07)47.00 (↑2.48)46.95 (↑1.62)
+ SV69.52 (↑1.41)49.54 (↑2.54)49.07 (↑2.12)
Text-ImageQwen2.5-7B65.0444.5245.33
+ CVs68.43 (↑3.39)48.61 (↑4.09)48.97 (↑3.64)
+ SV68.90 (↑0.47)50.02 (↑1.41)50.69 (↑1.72)
", + "bbox": [ + 94, + 263, + 485, + 356 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Ablation study on CrisisMMD with Qwen2.5-7B.", + "bbox": [ + 111, + 375, + 460, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 404, + 243, + 421 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our ablation studies on CrisisMMD and ScienceQA demonstrate the specific roles of VaLiK's components. As shown in Table 4 and Table 5, the CVs (CoE-based VLM) module improves accuracy across all settings, with average gains of $+3.05\\%$ on CrisisMMD and $+4.63\\%$ on ScienceQA tasks, validating visual descriptions enhance reasoning. However, the SV (Similarly Verification) module exhibits dual effects: it significantly improves CrisisMMD metrics by pruning redundant textual descriptions, yet slightly degrades ScienceQA's image-only natural science reasoning. We hypothesize this discrepancy arises from dataset characteristics: CrisisMMD's generated captions contain substantially more redundant content, whereas ScienceQA's simpler visual scenes yield shorter descriptions. Pruning these shorter descriptions risks over-removal of critical semantics. Furthermore, different types of KGs influence the effectiveness of the components: CVs achieve greater gains in CrisisMMD's text-image fusion as original text provides complementary context, while SV shows reduced effectiveness, likely due to occasional over-pruning of cross-modal linkages. Nevertheless, both modules collectively enhance performance across configurations, demonstrating their synergistic yet context-sensitive nature.", + "bbox": [ + 89, + 426, + 483, + 776 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Further Analysis", + "text_level": 1, + "bbox": [ + 89, + 787, + 258, + 803 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of VLM Quantity and Types. We evaluate the impact of varying quantities and types of VLMs on the CVs module. Our experiments reveal that Qwen2-VL generates the most visual descriptions, followed by LLaVA, while BLIP-2 produces the fewest. However, BLIP-2 demonstrates superior capability in extracting critical information", + "bbox": [ + 89, + 809, + 483, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/05342ce1e79f74e662270b7c353d9bd93d8429578bb671ee1bbfba5e163509a6.jpg", + "image_caption": [ + "Figure 4. Impact analysis of VLM quantity on CrisisMMD." + ], + "image_footnote": [], + "bbox": [ + 516, + 263, + 710, + 375 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3790fc9cc3c4a05154d2d0fc160ef6d6d27e8609eb80567e719a7f54aa2e6a58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 263, + 903, + 375 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and identifying key entity relationships within images. We therefore adopt BLIP-2 as the primary model, with LLaVA or Qwen2-VL serving as secondary/tertiary components. Adding more VLMs yields diminishing returns, due to limited entities in current images, though we hypothesize their benefits would increase for complex visual scenes with richer semantic content. This phenomenon is empirically validated by our quantitative results in Figure 4.", + "bbox": [ + 511, + 412, + 906, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Computational Costs. Due to space limitations, we provide an overview of VaLiK's computational costs in Appendix F. Our method is significantly more cost-effective than manual annotation or LLM fine-tuning.", + "bbox": [ + 511, + 536, + 906, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 616, + 633, + 632 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multimodal reasoning in LLMs is constrained by incomplete knowledge and hallucination artifacts, limitations that persist because textual KGs cannot bridge visual-textual semantics due to their modality isolation. To bridge this gap, we propose VaLiK, a framework for constructing MMKGs through vision-language alignment, eliminating dependency on manual annotations while resolving visual-textual semantic inconsistencies. By integrating a cascade of pretrained VLMs and cross-modal verification, VaLiK converts images into structured knowledge while filtering noise. The resulting graphs enhance LLMs' reasoning with minimal storage overhead. Experiments on multimodal reasoning benchmarks show SOTA performance. VaLiK's modular design supports adaptability across domains, offering a scalable solution for autonomous knowledge synthesis. This work advances multimodal AI systems by enabling efficient integration of visual and textual data.", + "bbox": [ + 511, + 643, + 908, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgments", + "text_level": 1, + "bbox": [ + 91, + 90, + 269, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The research was supported by Shanghai Artificial Intelligence Laboratory, the National Key R&D Program of China (Grant No. 2022ZD0160201) and the Science and Technology Commission of Shanghai Municipality (Grant No. 22DZ1100102).", + "bbox": [ + 89, + 114, + 483, + 191 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 207, + 187, + 223 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Mahdi Abavisani, Liwei Wu, Shengli Hu, Joel Tetreault, and Alejandro Jaimes. Multimodal categorization of crisis events in social media. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5", + "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1, 5", + "[3] Firoj Alam, Ferda Ofli, and Muhammad Imran. Crisismmd: Multimodal twitter datasets from natural disasters. Proceedings of the International AAAI Conference on Web and Social Media, 12(1), 2018. 2, 5", + "[4] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob L Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikol aj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karén Simonyan. Flamingo: a visual language model for few-shot learning. In Advances in Neural Information Processing Systems, pages 23716-23736. Curran Associates, Inc., 2022. 3, 4", + "[5] Razvan Azamfirei, Sapna R Kudchadkar, and James Fackler. Large language models and the perils of their hallucinations. Critical Care, 27(1):120, 2023. 1", + "[6] Jinheon Baek, Alham Fikri Aji, and Amir Saffari. Knowledge-augmented language model prompting for zero-shot knowledge graph question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), 2023. 2", + "[7] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. Multimodal machine learning: A survey and taxonomy. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41(2):423-443, 2019. 3", + "[8] Dawei Chen, Zhixu Li, Binbin Gu, and Zhigang Chen. Multimodal named entity recognition with image attributes and image knowledge. In Database Systems for Advanced Applications: 26th International Conference, DASFAA 2021, Taipei, Taiwan, April 11–14, 2021, Proceedings, Part II 26, pages 186–201. Springer, 2021. 1", + "[9] Jiawei Chen, Hongyu Lin, Xianpei Han, and Le Sun. Benchmarking large language models in retrieval-augmented generation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17754-17762, 2024. 1" + ], + "bbox": [ + 99, + 232, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[10] Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems (NeurIPS), 33:22243-22255, 2020. 1", + "[11] Xiang Chen, Ningyu Zhang, Lei Li, Shumin Deng, Chuanqi Tan, Changliang Xu, Fei Huang, Luo Si, and Huajun Chen. Hybrid transformer with multi-level fusion for multimodal knowledge graph completion. In Proceedings of the International Conference on Research and Development in Information Retrieva (SIGIR), pages 904-915, 2022. 2", + "[12] Xi Chen, Josip Djolonga, Piotr Padlewski, Basil Mustafa, Soravit Changpinyo, Jialin Wu, Carlos Riquelme Ruiz, Sebastian Goodman, Xiao Wang, Yi Tay, et al. Pali-x: On scaling up a multilingual vision and language model. arXiv preprint arXiv:2305.18565, 2023. 3", + "[13] Xiang Chen, Jingtian Zhang, Xiaohan Wang, Ningyu Zhang, Tongtong Wu, Yuxiang Wang, Yongheng Wang, and Huajun Chen. Continual multimodal knowledge graph construction. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, 2024. 3", + "[14] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1", + "[15] Yong Chen, Xinkai Ge, Shengli Yang, Linmei Hu, Jie Li, and Jinwen Zhang. A survey on multimodal knowledge graphs: Construction, completion and applications. Mathematics, 11 (8), 2023. 3", + "[16] Zhuo Chen, Yichi Zhang, Yin Fang, Yuxia Geng, Lingbing Guo, Xiang Chen, Qian Li, Wen Zhang, Jiaoyan Chen, Yushan Zhu, et al. Knowledge graphs meet multimodal learning: A comprehensive survey. arXiv preprint arXiv:2402.05391, 2024. 2", + "[17] Shiyao Cui, Jiangxia Cao, Xin Cong, Jiawei Sheng, Quanggang Li, Tingwen Liu, and Jinqiao Shi. Enhancing multimodal entity and relation extraction with variational information bottleneck. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 32:1274-1285, 2024. 2", + "[18] Xiaohui Cui, Xiaolong Qu, Dongmei Li, Yu Yang, Yuxun Li, and Xiaoping Zhang. Mkgcn: Multi-modal knowledge graph convolutional network for music recommender systems. *Electronics*, 12(12), 2023. 3", + "[19] Yihe Deng, Chenchen Ye, Zijie Huang, Mingyu Derek Ma, Yiwen Kou, and Wei Wang. Graphvis: Boosting llms with visual knowledge graph integration. In Advances in Neural Information Processing Systems, pages 67511-67534. Curran Associates, Inc., 2024. 6, 7", + "[20] Tausif Diwan, G. Anirudh, and Jitendra V. Tembhurne. Object detection using yolo: challenges, architectural successors, datasets and applications. Multimedia Tools Appl., 82 (6):9243-9275, 2022. 3", + "[21] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 4", + "[22] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropoli" + ], + "bbox": [ + 516, + 92, + 905, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tansky, Robert Osazuwa Ness, and Jonathan Larson. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130, 2024. 5", + "[23] Quan Fang, Xiaowei Zhang, Jun Hu, Xian Wu, and Changsheng Xu. Contrastive multi-modal knowledge graph representation learning. IEEE Transactions on Knowledge and Data Engineering, 35(9):8983-8996, 2023. 3", + "[24] Duoduo Feng, Xiangteng He, and Yuxin Peng. Mkvse: Multimodal knowledge enhanced visual-semantic embedding for image-text retrieval. ACM Trans. Multimedia Comput. Commun. Appl., 19(5), 2023. 3", + "[25] Deepanway Ghosal, Navonil Majumder, Roy Ka-Wei Lee, Rada Mihalcea, and Soujanya Poria. Language guided visual question answering: Elevate your multimodal language model using knowledge-enriched prompts. arXiv preprint arXiv:2310.20159, 2023. 6, 7", + "[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 6", + "[27] Yunfei Guo, Fei Yin, Xiao-hui Li, Xudong Yan, Tao Xue, Shuqi Mei, and Cheng-Lin Liu. Visual traffic knowledge graph generation from scene images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 21604-21613, 2023. 3", + "[28] ZIRUI GUO, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. LightRAG: Simple and fast retrieval-augmented generation, 2024. 2, 5, 6", + "[29] Lavdim Halilaj, Juergen Luettin, Sebastian Monka, Cory Henson, and Stefan Schmid. Knowledge graph-based integration of autonomous driving datasets. International Journal of Semantic Computing, 17(02):249-271, 2023. 3", + "[30] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4", + "[31] Yang Hu, Guihua Wen, Adriane Chapman, Pei Yang, Mingnan Luo, Yingxue Xu, Dan Dai, and Wendy Hall. Graph-based visual-semantic entanglement network for zero-shot image recognition. IEEE Transactions on Multimedia, 24: 2473-2487, 2022. 3", + "[32] Zhiqiang Hu, Lei Wang, Yihuai Lan, Wanyu Xu, Ee-Peng Lim, Lidong Bing, Xing Xu, Soujanya Poria, and Roy Lee. LLM-adapters: An adapter family for parameter-efficient fine-tuning of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5254-5276, Singapore, 2023. Association for Computational Linguistics. 2", + "[33] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 3, 6", + "[34] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 2" + ], + "bbox": [ + 91, + 90, + 482, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[35] Adam Tauman Kalai and Santosh S. Vempala. Calibrated language models must hallucinate. In Proceedings of the 56th Annual ACM Symposium on Theory of Computing, page 160–171, New York, NY, USA, 2024. Association for Computing Machinery. 1", + "[36] Amar Viswanathan Kannan, Dmitriy Fradkin, Ioannis Akrotirianakis, Tugba Kulahcioglu, Arquimedes Canedo, Aditi Roy, Shih-Yuan Yu, Malawade Arnav, and Mohammad Abdullah Al Faruque. Multimodal knowledge graph for deep learning papers and code. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 3417-3420, New York, NY, USA, 2020. Association for Computing Machinery. 3", + "[37] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 7", + "[38] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 6, 7", + "[39] Junlin Lee, Yequan Wang, Jing Li, and Min Zhang. Multimodal reasoning with multimodal knowledge graph. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10767-10782, Bangkok, Thailand, 2024. Association for Computational Linguistics. 2, 3, 1", + "[40] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In Proceedings of the 39th International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 3", + "[41] Junnan Li, Dongxu Li, Silvio Savarese, and Steven C. H. Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning (ICML), pages 19730–19742, 2023. 1, 3, 4, 6, 2", + "[42] Xin Li, Dongze Lian, Zhihe Lu, Jiawang Bai, Zhibo Chen, and Xinchao Wang. Graphadapter: Tuning vision-language models with dual knowledge graph. In Advances in Neural Information Processing Systems, pages 13448-13466. Curran Associates, Inc., 2023. 3", + "[43] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In Advances in Neural Information Processing Systems, pages 34892-34916. Curran Associates, Inc., 2023. 3, 6, 7", + "[44] Junming Liu, Yanting Gao, Siyuan Meng, Yifei Sun, Aoqi Wu, Yufei Jin, Yirong Chen, Ding Wang, and Guosun Zeng. Mosaic: Data-free knowledge distillation via mixture-of-experts for heterogeneous distributed environments. arXiv preprint arXiv:2505.19699, 2025. 1", + "[45] Junming Liu, Guosun Zeng, Ding Wang, Yanting Gao, and Yufei Jin. Fedrecon: Missing modality reconstruction in distributed heterogeneous environments. arXiv preprint arXiv:2504.09941, 2025.3" + ], + "bbox": [ + 516, + 90, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 925, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[46] Ye Liu, Hui Li, Alberto Garcia-Duran, Mathias Niepert, Daniel Onoro-Rubio, and David S Rosenblum. Mmkg: multi-modal knowledge graphs. In The Semantic Web: 16th International Conference, ESWC 2019, Portoroz, Slovenia, June 2–6, 2019, Proceedings 16, pages 459–474. Springer, 2019. 2, 6, 7", + "[47] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 4", + "[48] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Advances in Neural Information Processing Systems, pages 2507–2521. Curran Associates, Inc., 2022. 2, 5, 6, 7", + "[49] Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Advances in Neural Information Processing Systems, pages 43447-43478. Curran Associates, Inc., 2023. 6, 7", + "[50] Gen Luo, Yiyi Zhou, Tianhe Ren, Shengxin Chen, Xiaoshuai Sun, and Rongrong Ji. Cheap and quick: Efficient vision-language instruction tuning for large language models. In Advances in Neural Information Processing Systems, pages 29615-29627. Curran Associates, Inc., 2023. 6", + "[51] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3", + "[52] Runqing Miao, Qingxuan Jia, Fuchun Sun, Gang Chen, Haiming Huang, and Shengyi Miao. Semantic representation of robot manipulation with knowledge graph. Entropy, 25(4), 2023. 3", + "[53] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431, 2024. 6", + "[54] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024. 1", + "[55] Bryan A. Plummer, Liwei Wang, Chris M. Cervantes, Juan C. Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2015. 2, 1", + "[56] Shengsheng Qian, Jun Hu, Quan Fang, and Changsheng Xu. Knowledge-aware multi-modal adaptive graph convolutional networks for fake news detection. ACM Trans. Multimedia Comput. Commun. Appl., 17(3), 2021. 3", + "[57] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry," + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3, 6", + "[58] Brian Reily, Christopher Reardon, and Hao Zhang. Representing multi-robot structure through multimodal graph embedding for the selection of robot teams. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 5576–5582, 2020. 3", + "[59] Joshua Robinson, Christopher Michael Ryting, and David Wingate. Leveraging large language models for multiple choice question answering. In Proceedings of the International Conference on Learning Representations (ICLR), 2023. 1, 4", + "[60] Hrituraj Singh, Anshul Nasery, Denil Mehta, Aishwarya Agarwal, Jatin Lamba, and Balaji Vasan Srinivasan. MI-MOQA: Multimodal input multimodal output question answering. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5317-5332, Online, 2021. Association for Computational Linguistics. 3", + "[61] Yaoxian Song, Penglei Sun, Haoyu Liu, Zhixu Li, Wei Song, Yanghua Xiao, and Xiaofang Zhou. Scene-driven multimodal knowledge graph construction for embodied ai. IEEE Transactions on Knowledge and Data Engineering, 36(11): 6962-6976, 2024. 2, 3", + "[62] Rui Sun, Xuezhi Cao, Yan Zhao, Junchen Wan, Kun Zhou, Fuzheng Zhang, Zhongyuan Wang, and Kai Zheng. Multimodal knowledge graphs for recommender systems. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 1405-1414, New York, NY, USA, 2020. Association for Computing Machinery. 3", + "[63] Yu Sun, Shuohuan Wang, Shikun Feng, Siyu Ding, Chao Pang, Junyuan Shang, Jiaxiang Liu, Xuyi Chen, Yanbin Zhao, Yuxiang Lu, et al. Ernie 3.0: Large-scale knowledge enhanced pre-training for language understanding and generation. arXiv preprint arXiv:2107.02137, 2021. 2", + "[64] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 3", + "[65] Shengbang Tong, Ellis L Brown II, Penghao Wu, Sanghyun Woo, ADITHYA JAIRAM IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, Xichen Pan, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 1", + "[66] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1, 5", + "[67] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia" + ], + "bbox": [ + 516, + 90, + 903, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 4", + "[68] Peng Wang, Qi Wu, Chunhua Shen, Anthony Dick, and Anton van den Hengel. Fvqa: Fact-based visual question answering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(10):2413-2427, 2018. 3", + "[69] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 3, 6", + "[70] Xin Wang, Benyuan Meng, Hong Chen, Yuan Meng, Ke Lv, and Wenwu Zhu. Tiva-kg: A multimodal knowledge graph with text, image, video and audio. In Proceedings of the 31st ACM International Conference on Multimedia, page 2391-2399, New York, NY, USA, 2023. Association for Computing Machinery. 3", + "[71] Yuequn Wang, Liyan Dong, Hao Zhang, Xintao Ma, Yongli Li, and Minghui Sun. An enhanced multi-modal recommendation based on alternate training with knowledge graph representation. IEEE Access, 8:213012-213026, 2020. 3", + "[72] Tao Wu, Mengze Li, Jingyuan Chen, Wei Ji, Wang Lin, Jinyang Gao, Kun Kuang, Zhou Zhao, and Fei Wu. Semantic alignment for multimodal large language models. In Proceedings of the 32nd ACM International Conference on Multimedia, page 3489-3498, New York, NY, USA, 2024. Association for Computing Machinery. 1", + "[73] Yike Wu, Nan Hu, Guilin Qi, Sheng Bi, Jie Ren, Anhuan Xie, and Wei Song. Retrieve-rewrite-answer: A kg-to-text enhanced llms framework for knowledge graph question answering. arXiv preprint arXiv:2309.11206, 2023. 2", + "[74] Ziyang Xiao, Dongxiang Zhang, Yangjun Wu, Lilin Xu, Yuan Jessica Wang, Xiongwei Han, Xiaojin Fu, Tao Zhong, Jia Zeng, Mingli Song, and Gang Chen. Chain-of-experts: When LLMs meet complex operations research problems. In The Twelfth International Conference on Learning Representations, 2024. 2, 3", + "[75] Dexuan Xu, Yanyuan Chen, Jieyi Wang, Yue Huang, Hanpin Wang, Zhi Jin, Hongxing Wang, Weihua Yue, Jing He, Hang Li, and Yu Huang. MLeVLM: Improve multi-level progressive capabilities based on multimodal large language model for medical visual question answering. In Findings of the Association for Computational Linguistics: ACL 2024, pages 4977-4997, Bangkok, Thailand, 2024. Association for Computational Linguistics. 1", + "[76] Peng Xu, Xiatian Zhu, and David A. Clifton. Multimodal learning with transformers: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(10):12113-12132, 2023. 3", + "[77] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2. 5 technical report. arXiv preprint arXiv:2412.15115, 2024. 6", + "[78] Xiaocui Yang, Wenfang Wu, Shi Feng, Ming Wang, Daling Wang, Yang Li, Qi Sun, Yifei Zhang, Xiaoming Fu, and Soujanya Poria. Mm-bigbench: Evaluating multimodal models" + ], + "bbox": [ + 91, + 90, + 482, + 901 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "on multimodal content comprehension tasks. arXiv preprint arXiv:2310.09036, 2023. 7", + "[79] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 3", + "[80] Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023. 1", + "[81] Jingtong Yue, Zhiwei Lin, Xin Lin, Xiaoyu Zhou, Xiangtai Li, Lu Qi, Yongtao Wang, and Ming-Hsuan Yang. RobuR-CDet: Enhancing robustness of radar-camera fusion in bird's eye view for 3d object detection. In The Thirteenth International Conference on Learning Representations, 2025. 3", + "[82] Yichi Zhang, Zhuo Chen, Lingbing Guo, Yajing Xu, Binbin Hu, Ziqi Liu, Huajun Chen, and Wen Zhang. Mygo: Discrete modality information as fine-grained tokens for multi-modal knowledge graph completion. CoRR, abs/2404.09468, 2024. 2", + "[83] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multimodal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, 2024. 3", + "[84] Jiabao Zhao, Xin Lin, Jie Zhou, Jing Yang, Liang He, and Zhaohui Yang. Knowledge-based fine-grained classification for few-shot learning. In 2020 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6, 2020. 3", + "[85] Changmeng Zheng, Junhao Feng, Ze Fu, Yi Cai, Qing Li, and Tao Wang. Multimodal relation extraction with efficient graph alignment. In Proceedings of the 29th ACM International Conference on Multimedia, page 5298-5306, New York, NY, USA, 2021. Association for Computing Machinery. 2", + "[86] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. In Advances in Neural Information Processing Systems, pages 5168-5191. Curran Associates, Inc., 2023. 3, 6, 7", + "[87] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In 2024 IEEE 40th International Conference on Data Engineering (ICDE), pages 70-82, 2024. 3", + "[88] Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaogian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594, 2023. 4", + "[89] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 3", + "[90] Xiangru Zhu, Zhixu Li, Xiaodan Wang, Xueyao Jiang, Penglei Sun, Xuwu Wang, Yanghua Xiao, and Nicholas Jing Yuan. Multi-modal knowledge graph construction and ap" + ], + "bbox": [ + 516, + 92, + 903, + 901 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "plication: A survey. IEEE Transactions on Knowledge and Data Engineering, 36(2):715-735, 2024. 2", + "[91] Zhengxia Zou, Keyan Chen, Zhenwei Shi, Yuhong Guo, and Jieping Ye. Object detection in 20 years: A survey. Proceedings of the IEEE, 111(3):257-276, 2023. 3" + ], + "bbox": [ + 91, + 90, + 483, + 162 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning", + "text_level": 1, + "bbox": [ + 106, + 85, + 890, + 131 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a3b9193854505da3f69bfbf1b89c3b890690e66f382f92dbb2246b00c8c73cc4.jpg", + "image_caption": [ + "Figure 5. (a) The limited information contained in text-based KGs leads to inaccurate responses. (b) Leveraging MMKGs enables reasoning with enriched multimodal information to produce the correct answer." + ], + "image_footnote": [], + "bbox": [ + 94, + 178, + 478, + 406 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A. Cross-Modal Reasoning Failures in Textual KGs", + "text_level": 1, + "bbox": [ + 89, + 491, + 483, + 525 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Multimodal learning, by virtue of its capability to synergistically integrate heterogeneous data modalities, establishes a comprehensive knowledge acquisition paradigm that significantly enhances reasoning robustness [39]. This principle extends to Multimodal Knowledge Graphs (MMKGs), where the semantic symbiosis between visual and textual modalities addresses the critical limitation of modal isolation inherent in conventional text-based KGs. As empirically demonstrated in Figure 5, pure textual KGs often induce hallucinated or incomplete responses due to their inability to resolve visual-textual semantic ambiguities. For instance, when queried about fine-grained visual attributes (e.g., spatial relationships or object properties absent in textual metadata), LLMs grounded solely on textual KG triples frequently generate plausible but factually inconsistent answers, as they lack access to cross-modal referential grounding. In contrast, MMKGs bridge this gap through bidirectional visual-textual entity linking, enabling LLMs to retrieve and reason over fused evidence from both modalities. Our qualitative analysis of the case in Figure 5 reveals that the multimodal reasoning path—leveraging both image-derived entities and textual relationships—is essential for deriving logically coherent and factually accurate conclusions.", + "bbox": [ + 89, + 537, + 483, + 900 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/4482265fe190048fcacd251960a888f5a863aba009211a7e298bd63dc9539739.jpg", + "image_caption": [ + "Figure 6. Three example social media posts with labelled named entities [8]." + ], + "image_footnote": [], + "bbox": [ + 514, + 178, + 903, + 327 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/70c509381366e918387c50f30aab692b2a319db199d55fff823b7322e85e7c0d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Type#ChainsMentions/ChainBoxes/Chain
people597663.171.95
clothing423801.761.44
body parts128091.501.42
animals50863.631.44
vehicles55612.771.21
instruments18272.851.61
scene469192.030.62
other820981.941.04
total2440352.101.13
", + "bbox": [ + 517, + 369, + 913, + 535 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 6. Coreference chain statistics of Flickr30K-Entity. The number of mentions per chain indicates how salient an entity is. The number of boxes per chain indicates how many distinct entities it refers to.", + "bbox": [ + 511, + 545, + 906, + 599 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B. Case Studies on Manual Annotation Overheads", + "text_level": 1, + "bbox": [ + 511, + 628, + 903, + 662 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The development of robust entity extraction models typically hinges on large-scale annotated corpora, yet the generalizability of these models remains intrinsically bounded by the semantic scope and granularity of their training datasets. Widely-adopted benchmarks such as Flickr30K-Entity [55] exemplify this constraint: while serving as de facto standards for evaluating visual-linguistic entity grounding, their construction necessitates labor-intensive manual annotations at scale. As illustrated in Figure 6, even high-quality annotations in such datasets often adopt a minimalist tagging paradigm—identifying only coarse-grained entities while neglecting fine-grained attributes and contextual relationships. This sparsity of semantic enrichment directly propagates to trained models, which consequently fail to capture the compositional semantics necessary for com", + "bbox": [ + 509, + 674, + 906, + 900 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b3d612618801180dba35a284f64b34eea05de762b170c942e03ec9dd5a4b8bdd.jpg", + "image_caption": [ + "Figure 7. An example from the ScienceQA benchmark [48], illustrating multimodal question-answering scenarios that necessitate joint reasoning over textual prompts and visual evidence." + ], + "image_footnote": [], + "bbox": [ + 93, + 88, + 480, + 258 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "plex reasoning scenarios.", + "bbox": [ + 89, + 324, + 261, + 339 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C. Case Studies on Visual Specificity Deficits in VLM-Generated Captions", + "text_level": 1, + "bbox": [ + 89, + 351, + 482, + 386 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As exemplified in Figure 7, vision-language models like BLIP-2 [41] tend to produce oversimplified textual descriptions that critically lack actionable visual-semantic signals. The VLM-generated caption (\"A map of the united states with the location of the united states\") merely identifies coarse-grained scene semantics, failing to capture object-level attributes (color coding of regions), spatial relationships (border adjacency between Arizona and Mexico) and compositional context (compass orientation in lower-right corner). In contrast, human annotations (\"This is a map of the United States. The main part of the country is shown in green, with several states labeled. Arizona is in the southwestern part of the US, bordering Mexico. Oklahoma is in the central - southern region. Louisiana is located along the Gulf of Mexico in the southeastern part. West Virginia is in the eastern part of the country. There's also a compass in the bottom - right corner to show directions.\") demonstrate essential characteristics for multimodal reasoning.", + "bbox": [ + 89, + 393, + 483, + 667 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D. Retrieval Strategy in MMKG Construction", + "text_level": 1, + "bbox": [ + 89, + 678, + 480, + 696 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We adopt retrieval strategies based on the framework provided by LightRAG [28], which supports multiple modes:", + "bbox": [ + 89, + 704, + 482, + 734 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- local: focuses on context-dependent information;", + "- global: utilizes global knowledge;", + "- hybrid: combines local and global retrieval methods;", + "- naive: performs basic search without advanced techniques;", + "- mix: integrates knowledge graph and vector retrieval;" + ], + "bbox": [ + 89, + 734, + 480, + 824 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In our implementation, we rely on the hybrid retrieval mode, which balances the precision of local cues with the breadth of global knowledge. This strategy improves the relevance and completeness of retrieved information, which is crucial for high-quality MMKG construction.", + "bbox": [ + 89, + 825, + 482, + 900 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Algorithm 1 MMKG Generation", + "text_level": 1, + "bbox": [ + 514, + 90, + 736, + 104 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Require: $\\hat{S}$ (refined description), $T$ (external knowledge, optional)", + "bbox": [ + 513, + 112, + 903, + 143 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Ensure: $\\mathcal{G} = (\\mathcal{E},\\mathcal{R})$ (knowledge graph)", + "bbox": [ + 514, + 143, + 785, + 157 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: $\\mathcal{T}\\gets \\hat{S}\\oplus T$ $\\triangleright$ Concatenate $\\hat{S}$ and $T$", + "2: $\\mathcal{G} \\leftarrow$ LightRAG(T) $\\triangleright$ Generate graph via LightRAG", + "3: $(\\mathcal{E},\\mathcal{R})\\gets f_{\\mathrm{ERE}}(\\mathcal{T})$ Extract entities and relations", + "4: return $\\mathcal{G} = \\{(h,r,t)\\mid h,t\\in \\mathcal{E},r\\in \\mathcal{R}\\}$" + ], + "bbox": [ + 524, + 157, + 903, + 219 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "LightRAG is an excellent project that effectively supports automatic MMKG construction, and its retrieval design plays a central role in our framework. Specifically, LightRAG introduces keyword-guided text chunking to expand the retrievable context. By leveraging both high-level and low-level keywords in combination with chunk-level vector retrieval, it enables more comprehensive knowledge access. In addition, the choice of the retrieval model is also important. Larger LLMs have slower retrieval speeds but better performance. In this experiment, we used Qwen2.5-7B for retrieval. We also tested the retrieval performance of 32B and 72B models, which showed a $1\\% - 5\\%$ improvement in performance, but it also significantly increased the graph construction time. Therefore, we finally adopted a lightweight retrieval model. The details of the entire LightRAG are shown in Algorithm 1.", + "bbox": [ + 511, + 250, + 906, + 491 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E. Selection of Sensitivity Threshold $\\tau$", + "text_level": 1, + "bbox": [ + 511, + 503, + 834, + 522 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We select the sensitivity threshold $\\tau$ empirically based on performance on the validation set. In practice, $\\tau$ can be approximately determined by observing the token length distribution of captions: datasets with richer visual content and longer captions tend to benefit from a lower $\\tau$ , while simpler datasets can tolerate a higher $\\tau$ . This provides a practical way to adjust $\\tau$ without extensive tuning.", + "bbox": [ + 511, + 529, + 905, + 635 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In addition, we notice a key pattern when analyzing the relevance scores across windows. Around certain values of $\\tau$ , the scores tend to cluster tightly on both sides of the threshold. As a result, even a small change in $\\tau$ near these points can lead to a large change in the number of tokens being pruned. This indicates that the pruning process is especially sensitive around those points, and adjusting $\\tau$ even slightly may have a big impact on the final token budget.", + "bbox": [ + 511, + 635, + 905, + 757 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "F. Construction Cost and Scalability", + "text_level": 1, + "bbox": [ + 511, + 768, + 823, + 787 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Construction cost is a complex issue, which we analyze from the perspectives of time and hardware requirements. Time-wise, the main components are CoE and LightRAG. While using APIs can significantly speed up the process, offline deployment and inference are also feasible. For example, generating descriptions with Qwen2-VL-7B achieves around 60 tokens per second, processing one image ev", + "bbox": [ + 511, + 794, + 905, + 901 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "ery 4 seconds. Thus, processing 1k images takes approximately 1.21 hours. Constructing a KG with Qwen2.5-7B yields about 196k tokens per hour, leading to a total of 1.33 hours for 1k images. The intermediate pruning step, accelerated by CLIP's fast processing speed, is negligible. Overall, the cost is much lower than manual annotation or fine-tuning LLMs, making the method applicable to largescale datasets. For resource-constrained users, deploying a lightweight VLM with CoE is comparable to or even more efficient than deploying a powerful VLM, further demonstrating the scalability of our approach.", + "bbox": [ + 93, + 90, + 480, + 256 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "G. Discussion on VLM Usage and Design Flexibility", + "text_level": 1, + "bbox": [ + 94, + 268, + 480, + 301 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our observations on the number and type of VLMs used in CoE are consistent with the original conclusions drawn in the CoE paper [74]. Regardless of the specific VLM architecture, increasing the number of models $N$ consistently improves performance up to a saturation point, after which further scaling yields diminishing returns. Moreover, we find that convergence is achieved more quickly when using lower softmax temperatures or simpler datasets. These factors reduce the ambiguity in model disagreement, allowing consensus to form more rapidly among the ensemble.", + "bbox": [ + 93, + 311, + 480, + 460 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Interestingly, our results also show that using a single, strong VLM can achieve performance comparable to a cascade of smaller, lightweight models. This suggests a practical trade-off between model strength and ensemble size—while ensembling helps in reaching consensus across diverse weak learners, a single high-capacity model may suffice in many scenarios, especially when computational resources are limited.", + "bbox": [ + 93, + 463, + 480, + 582 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In the original CoE method, the outputs from all VLM experts are first aggregated together, and then a selection process determines which expert descriptions to use. To save time in constructing the MMKGs with LLMs, we instead adopted a sequential strategy where the output of one expert is used as the prompt input for the next. We also evaluated the original aggregation and selection strategy on a smaller-scale dataset and found it to perform well, sometimes even surpassing the sequential approach. This confirms that CoE's original design of aggregating all experts' outputs before selecting which descriptions to use is effective and remains a strong baseline. However, correspondingly, using LLMs to construct MMKGs based on these aggregated descriptions requires significantly more time.", + "bbox": [ + 93, + 584, + 480, + 794 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Additionally, while we apply pruning only at the final description step, pruning during intermediate steps may also yield good results depending on the dataset and task. There is no fixed rule for when or how to apply pruning, and our framework is designed to be flexible enough to accommodate different strategies. We emphasize that both our CoE framework and the SV step are intended to be adaptable, al", + "bbox": [ + 94, + 796, + 480, + 898 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "lowing users to experiment freely and select the approach that best suits their needs.", + "bbox": [ + 517, + 92, + 903, + 119 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "There are various VLMs that can be used for pruning. Among them, we recommend CLIP due to its fast inference speed and pruning performance comparable to other VLMs. Given its efficiency and effectiveness, CLIP serves as a practical choice for pruning in many scenarios.", + "bbox": [ + 517, + 122, + 903, + 196 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_model.json b/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ffb0ee125f3bb46e257fa7adaeff087988fab655 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_model.json @@ -0,0 +1,3345 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.058, + 0.725 + ], + "angle": 270, + "content": "arXiv:2503.12972v3 [cs.CV] 21 Nov 2025" + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.13, + 0.892, + 0.177 + ], + "angle": 0, + "content": "Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.203, + 0.792, + 0.222 + ], + "angle": 0, + "content": "Junming Liu\\(^{1,2}\\), Siyuan Meng\\(^{2,3}\\), Yanting Gao\\(^{1}\\), Song Mao\\(^{2}\\), Pinlong Cai\\(^{2}\\)," + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.222, + 0.775, + 0.24 + ], + "angle": 0, + "content": "Guohang Yan\\(^{2}\\), Yirong Chen\\(^{2,4}\\), Zilin Bian\\(^{5}\\), Ding Wang\\(^{2*}\\), Botian Shi\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.24, + 0.756, + 0.258 + ], + "angle": 0, + "content": "\\(^{1}\\)Tongji University \\(^{2}\\)Shanghai Artificial Intelligence Laboratory" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.257, + 0.81, + 0.275 + ], + "angle": 0, + "content": "\\(^{3}\\)East China Normal University \\(^{4}\\)Stanford University \\(^{5}\\)New York University" + }, + { + "type": "text", + "bbox": [ + 0.266, + 0.278, + 0.722, + 0.293 + ], + "angle": 0, + "content": "liu_junming6917@tongji.edu.cn wangding@pjlab.org.cn" + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.327, + 0.327, + 0.343 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.358, + 0.485, + 0.766 + ], + "angle": 0, + "content": "Multimodal reasoning in Large Language Models (LLMs) struggles with incomplete knowledge and hallucination artifacts, challenges that textual Knowledge Graphs (KGs) only partially mitigate due to their modality isolation. While Multimodal Knowledge Graphs (MMKGs) promise enhanced cross-modal understanding, their practical construction is impeded by semantic narrowness of manual text annotations and inherent noise in visual-semantic entity linkages. In this paper, we propose Vision-align-to-Language integrated Knowledge Graph (VaLiK), a novel approach for constructing MMKGs that enhances LLMs reasoning through cross-modal information supplementation. Specifically, we cascade pre-trained Vision-Language Models (VLMs) to align image features with text, transforming them into descriptions that encapsulate image-specific information. Furthermore, we developed a cross-modal similarity verification mechanism to quantify semantic consistency, effectively filtering out noise introduced during feature alignment. Even without manually annotated image captions, the refined descriptions alone suffice to construct the MMKG. Compared to conventional MMKGs construction paradigms, our approach achieves substantial storage efficiency gains while maintaining direct entity-to-image linkage capability. Experimental results on multimodal reasoning tasks demonstrate that LLMs augmented with VaLiK outperform previous state-of-the-art models. Our code is published at https://github.com/Wings-Of-Disaster/VaLiK." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.794, + 0.222, + 0.809 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.819, + 0.484, + 0.88 + ], + "angle": 0, + "content": "Recent advancements in Large Language Models (LLMs) [2, 10, 26, 66] have demonstrated their superiority and versatility across various Natural Language Reasoning (NLR) tasks [9, 44, 54, 59]. To enhance LLMs into the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.11, + 0.888, + 0.236, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.328, + 0.902, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.699, + 0.907, + 0.755 + ], + "angle": 0, + "content": "Figure 1. (a) Training entity extraction models relies on extensive fine-grained annotations, increasing labeling costs. More examples are provided in Appendix B. (b) Capturing implicit semantic associations demands abstract comprehension or logical inference." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.901 + ], + "angle": 0, + "content": "realm of multimodal reasoning, researchers [65, 72, 75, 80] have endeavored to equip these models with multimodal capabilities, as evidenced by advancements in Multimodal Large Language Models (MLLMs) such as BLIP-2 [41], GPT-4o [33], Janus-Pro [14], among others. Despite their notable progress, these models often experience hallucinations [5, 35], primarily arising from knowledge deficiencies due to incomplete or obsolete information." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.212 + ], + "angle": 0, + "content": "Fine-tuning LLMs demands prohibitive computational costs [32]. While text-based Knowledge Graphs (KGs) have partially addressed this limitation by efficient real-time updates [6, 63, 73], they are still restricted by modal isolation, which hinders cross-modal reasoning, as detailed in Appendix A. To bridge this semantic fragmentation, Multimodal Knowledge Graphs (MMKGs) have been developed as unified representational frameworks [11, 34, 39, 46]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.215, + 0.482, + 0.352 + ], + "angle": 0, + "content": "However, constructing robust MMKGs faces two primary obstacles [16, 90]. First, the lack of large-scale fine-grained entity-image corpora makes it infeasible to train high-quality entity extractors, significantly constraining scalability, as illustrated in Figure 1a. Second, conventional visual relation detectors primarily identify superficial spatial interactions instead of semantic relations consistent with KGs, while frequently hallucinating implausible connections that corrupt graph integrity, as shown in Figure 1b." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.355, + 0.483, + 0.793 + ], + "angle": 0, + "content": "In this paper, we propose VaLiK, short for Vision-align-to-Language integrated Knowledge Graph, a novel framework designed to empower LLMs with advanced multimodal reasoning. Unlike traditional methods that rely on text annotations for training extraction models and the knowledge construction process [55], VaLiK adopts a annotation-free approach to MMKGs construction. Specifically, we first employ several pretrained Vision-Language models (VLMs), designed based on Chain-of-Experts (CoE) principles [74], to convert visual inputs into image-specific textual descriptions through cross-modal feature alignment. This procedure eliminates the need for manually annotated image captions in both the knowledge extraction and construction phases while preserving visual details typically missing in generic text descriptions. Moreover, in contrast to existing relation detection methods that require predefined label taxonomies [17, 61, 82, 85], VaLiK excels at extracting profound semantic relationships that are both KG-compatible and capture novel associations beyond training supervision. While VLMs enable cross-modal reasoning and interpretation, they introduce spurious relational noise through hallucinated inter-modal attributions, as depicted in Figure 2. We address this limitation through cross-modal similarity recalibration, strategically filtering inconsistent information while preserving valid semantic correspondences. Finally, the purified descriptions are systematically organized into MMKGs via LLM-driven symbolic structuring [28], bridging visual and textual domains with factual consistency." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.796, + 0.482, + 0.901 + ], + "angle": 0, + "content": "To thoroughly evaluate the VaLiK method, we conduct a comprehensive assessment across two critical multimodal benchmarks: multimodal classification (tested on the CrisisMMD dataset [3]) and multimodal question answering (evaluated via the ScienceQA benchmark [48]). The experiments span diverse LLM architectures and MMKG construction techniques to ensure the framework's robustness." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.093, + 0.903, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.275, + 0.905, + 0.304 + ], + "angle": 0, + "content": "Figure 2. Feature-aligned descriptions from VLMs introduce redundant and inaccurate relationship patterns." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.318, + 0.905, + 0.409 + ], + "angle": 0, + "content": "The experimental results demonstrate that the MMKGs constructed by VaLiK achieve superior multimodal reasoning performance in LLMs while requiring substantially less storage than conventional approaches. More importantly, the proposed approach retains direct entity-to-image linkage capabilities even with the compressed graph structure." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.41, + 0.905, + 0.47 + ], + "angle": 0, + "content": "In summary, VaLiK is the first framework that enables end-to-end, annotation-free, zero-shot, and storage-efficient multimodal knowledge construction with high adaptability and scalability. Our key contributions include:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.472, + 0.904, + 0.562 + ], + "angle": 0, + "content": "- To the best of our knowledge, VaLik is the first end-to-end framework to build Annotation-Free MMKGs to improve LLMs' multimodal reasoning capabilities, effectively eliminating the need for manually annotated textual material and enabling a completely autonomous multimodal knowledge generation process." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.563, + 0.905, + 0.653 + ], + "angle": 0, + "content": "- We offer an innovative zero-shot method for constructing MMKG that captures deep semantic connections beyond traditional predetermined labels with an effective verification system that guarantees the accuracy of these relationships. The knowledge distillation paradigm greatly decreases storage while maintaining semantic integrity." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.653, + 0.905, + 0.729 + ], + "angle": 0, + "content": "- We develop a highly modular and extensible architecture that allows VaLiK to effortlessly incorporate new models and workflows for specialized domain tasks, facilitating rapid adaptation to diverse application scenarios without incurring expensive system changes." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.472, + 0.905, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.746, + 0.655, + 0.761 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.772, + 0.794, + 0.789 + ], + "angle": 0, + "content": "2.1. Multimodal Knowledge Graphs" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.905, + 0.901 + ], + "angle": 0, + "content": "The principal advantage of MMKGs resides in their multimodal integration beyond conventional KGs. By linking entities with corresponding visual or textual data, MMKGs introduce valuable visual and textual information to the knowledge base, substantially advancing multimodal reasoning capabilities. This combination addresses core challenges in tasks that inherently demand multimodal synergy" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.091, + 0.482, + 0.38 + ], + "angle": 0, + "content": "like autonomous driving [27, 29], image-text retrieval [24, 87] and robotic manipulation [52, 58]. However, constructing trustworthy MMKGs with minimal manual effort remains a critical challenge. Recent studies have proposed innovative strategies to enhance MMKG reliability and utility. For instance, Chen et al. [13] proposed MSPT, a framework addressing continual MMKG construction through gradient modulation for balanced multimodal learning and attention distillation to mitigate catastrophic forgetting. Song et al. [61] developed Scene-MMKG, integrating knowledge engineering with large language models to improve robotic manipulation by resolving data sparsity and knowledge uncertainty. Wang et al. [70] introduced TIVA-KG, the first quad-modal knowledge graph spanning text, image, video, and audio with triplet grounding, empirically validating its effectiveness in downstream tasks. While these advances enhance multimodal reasoning capabilities, their efficacy remains rooted in resource-intensive paradigms, requiring extensively annotated datasets for knowledge acquisition." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.39, + 0.479, + 0.407 + ], + "angle": 0, + "content": "2.2. Knowledge-Augmented Multimodal Learning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.413, + 0.483, + 0.732 + ], + "angle": 0, + "content": "Multimodal learning has seen significant progress in aligning and integrating information across different data modalities [7, 45, 76]. The incorporation of structured knowledge through MMKGs further enhances these approaches, improving the reasoning capabilities and generalization across a variety of domains, such as visual question answering [51, 60, 68], recommendation systems [18, 62, 71], and classification [31, 56, 84]. Methods like GraphAdapter's dual-KG adaptation [42] and contrastive multi-relational encoding with KGs [23] inject external knowledge into models, refining their performance and improving their capability to handle complex tasks. Additionally, Lee et al. [39] proposed MR-MKG, a novel framework that constructs task-specific MMKGs to enhance multimodal reasoning in LLMs. These knowledge-augmented paradigms demonstrate superior cross-modal semantic grounding compared to unimodal approaches [15, 36]. However, their reliance on preconstructed MMKGs often leads to domain discrepancies, where generic knowledge schemas misalign with task-specific reasoning patterns, ultimately limiting contextual precision in target applications." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.742, + 0.411, + 0.759 + ], + "angle": 0, + "content": "2.3. Multimodal Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.483, + 0.903 + ], + "angle": 0, + "content": "The limitations of text-only LLMs in meeting increasingly complex demands have spurred extensive research [79, 83, 86] into developing LLMs capable of effectively processing and reasoning over multimodal inputs. Current research predominantly employs adapter or projection layers to connect the embedding spaces of various modality-specific encoders with the textual embedding space of LLMs [39]. For instance, foundational models like CLIP [57] and BLIP [40] pioneered cross-modal alignment by jointly training vision" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.32 + ], + "angle": 0, + "content": "and text encoders to map images and text into a shared embedding space. Building on this, LLaVA [43] and Flamingo [4] advanced the field by integrating visual encoders with LLMs, enabling more nuanced multimodal understanding and generation. More recently, Gemini [64], Qwen2-VL [69] and GPT-4o [33] have further pushed the boundaries by scaling up multimodal pretraining and introducing sophisticated mechanisms for cross-modal interaction. However, multimodal LLMs remain prone to hallucinations. While they enhance cross-modal alignment, they neither acquire new knowledge nor avoid introducing noise through integration. To address these limitations, VaLiK \"uses the master's tools to refine the master's craft,\" first constructing MMKGs via MLLMs and then leveraging them to enhance MLLMs' reasoning capabilities." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.333, + 0.605, + 0.348 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.358, + 0.907, + 0.541 + ], + "angle": 0, + "content": "In this section, we present the technical details of VaLiK. VaLiK introduces a novel expansion-reduction paradigm for visual knowledge extraction. The architecture initially organizes several VLMs with distinct knowledge domains, designed based on CoE principles [74], to produce comprehensive textual descriptions encompassing hierarchical visual details. A cross-modal similarity verification mechanism then iteratively filters out noisy tokens through cross-modal alignment while preserving semantically salient elements. This optimization-style approach eliminates external textual dependencies while enabling effective MMKG construction. VaLiK's framework is shown in Figure 3." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.55, + 0.866, + 0.567 + ], + "angle": 0, + "content": "3.1. CoE-based Visual to Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.572, + 0.906, + 0.692 + ], + "angle": 0, + "content": "Recent entity detection techniques [20, 81, 91] have been widely adopted for entity and relation extraction in MMKG construction. However, these methods are inherently limited by predefined categorical boundaries, lacking the capacity to recognize visual concepts outside their training vocabulary. In contrast, VLMs pretrained on web-scale corpora [12, 41, 89] exhibit broader recognition capabilities through exposure to diverse visual concepts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.693, + 0.906, + 0.769 + ], + "angle": 0, + "content": "We therefore leverage pretrained VLMs to extract comprehensive visual information. This process removes the necessity for detailed fine-grained data typically required to train specialized recognition models. The generalized vision to language conversion pipeline can be formalized as:" + }, + { + "type": "equation", + "bbox": [ + 0.627, + 0.779, + 0.907, + 0.805 + ], + "angle": 0, + "content": "\\[\nS = \\mathcal {D} _ {\\text {t e x t}} \\left(\\mathcal {A} \\left(\\mathcal {E} _ {\\text {v i s}} (I)\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.906, + 0.902 + ], + "angle": 0, + "content": "where \\(I\\) denotes for the input image, \\(\\mathcal{E}_{\\mathrm{vis}}\\) denotes the visual encoder extracting visual features, \\(\\mathcal{A}\\) carries out cross-modal feature alignment and interaction, and \\(\\mathcal{D}_{\\mathrm{text}}\\) generates textual tokens through autoregressive decoding. The resulting visual description \\(S = \\{w_{1},\\dots,w_{n}\\}\\) emerges from this multi-stage processing." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.09, + 0.905, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.309, + 0.908, + 0.355 + ], + "angle": 0, + "content": "Figure 3. The pipeline of VaLiK: First, large-scale visual descriptions are generated using CoE-based VLMs. Then, a similarity verification mechanism is used to prune irrelevant information. Finally, MMKGs are constructed using LLMs based on LightRAG. The constructed MMKGs can assist LLMs in multimodal reasoning, alleviating the hallucination issues caused by incomplete knowledge." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.364, + 0.484, + 0.515 + ], + "angle": 0, + "content": "However, quantitative analysis uncovers considerable discrepancies between machine-generated and human-annotated descriptions [88]. As an illustration, while utilizing BLIP-2 [41] to generate sample captions, we noted that the model outputs are markedly concise and devoid of visual specifics, as detailed in Appendix C. To bridge this gap, we implement CoE enhanced generation through cascade VLMs processing. At iteration step \\( t \\), each expert \\( E_{i} \\) receives both the original visual signals \\( I \\) and the contextual output from the preceding expert \\( E_{i - 1} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.527, + 0.483, + 0.555 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {i} ^ {(t)} = E _ {i} \\left(I, \\mathcal {S} _ {i - 1} ^ {(t - 1)}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.566, + 0.483, + 0.601 + ], + "angle": 0, + "content": "where \\(S_{i - 1}^{(t - 1)}\\) denotes the description from expert \\(E_{i - 1}\\) at step \\(t - 1\\), with \\(S_0^{(t)}\\coloneqq \\emptyset\\) for initialization." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.602, + 0.483, + 0.634 + ], + "angle": 0, + "content": "Specifically, each expert \\( E_{i} \\) implements a unified visual-language processing task:" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.636, + 0.303, + 0.65 + ], + "angle": 0, + "content": "1. Visual Feature Extraction:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.662, + 0.483, + 0.681 + ], + "angle": 0, + "content": "\\[\n\\mathbf {V} _ {i} = \\operatorname {E n c} _ {\\text {v i s}} ^ {i} (I) \\in \\mathbb {R} ^ {d _ {v} \\times N _ {p}}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.693, + 0.483, + 0.724 + ], + "angle": 0, + "content": "where \\(\\mathsf{Enc}_{\\mathrm{vis}}^i\\) denotes established visual encoder [21, 30, 47] producing \\(N_{p}\\) patch embeddings with dimension \\(d_v\\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.726, + 0.408, + 0.739 + ], + "angle": 0, + "content": "2. Cross-Modal Interaction and Generation:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.74, + 0.483, + 0.787 + ], + "angle": 0, + "content": "VLMs integrate pretrained learnable query embeddings \\(\\mathbf{Q}_i\\in \\mathbb{R}^{d_q\\times L_q}\\) to interact with visual features \\(\\mathbf{V}_i\\in\\) \\(\\mathbb{R}^{d_v\\times N_p}\\) via cross-attention [67]:" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.797, + 0.483, + 0.859 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {H} _ {i} = \\operatorname {C r o s s A t t n} \\left(\\mathbf {Q} _ {i}, \\mathbf {V} _ {i}\\right) \\\\ = \\operatorname {s o f t m a x} \\left(\\frac {\\mathbf {Q} _ {i} \\mathbf {W} _ {q} ^ {i} \\left(\\mathbf {V} _ {i} \\mathbf {W} _ {k} ^ {i}\\right) ^ {\\top}}{\\sqrt {d _ {k}}}\\right) \\mathbf {V} _ {i} \\mathbf {W} _ {v} ^ {i}, \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.869, + 0.484, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{W}_q^i\\in \\mathbb{R}^{d_q\\times d_k}\\), \\(\\mathbf{W}_k^i\\), \\(\\mathbf{W}_v^i\\in \\mathbb{R}^{d_v\\times d_k}\\), and \\(L_{q}\\) denotes the predefined query length. Cross-attention serves" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.364, + 0.905, + 0.41 + ], + "angle": 0, + "content": "as a prevalent approach, while other interaction strategies coexist [4]. The adopted VLMs in our implementation primarily rely on this approach for modality fusion." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.412, + 0.659, + 0.425 + ], + "angle": 0, + "content": "3. Text Generation:" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.425, + 0.905, + 0.461 + ], + "angle": 0, + "content": "The text encoder \\(\\mathsf{Enc}_{\\mathrm{text}}^{i}\\) first processes the preceding expert's output \\(S_{i - 1}^{(t - 1)}\\) into latent features:" + }, + { + "type": "equation", + "bbox": [ + 0.613, + 0.473, + 0.905, + 0.493 + ], + "angle": 0, + "content": "\\[\n\\mathbf {P} _ {i} = \\operatorname {E n c} _ {\\text {t e x t}} ^ {i} \\left(S _ {i - 1} ^ {(t - 1)}\\right) \\in \\mathbb {R} ^ {d _ {t} \\times L}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.507, + 0.905, + 0.541 + ], + "angle": 0, + "content": "Subsequently, the text decoder \\(\\mathrm{Dec}_{\\mathrm{text}}^{i}\\) synthesizes the final output \\(S_{i}^{(t)}\\) by jointly conditioning on \\(\\mathbf{P}_i\\) and \\(\\mathbf{H}_i\\):" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.565, + 0.905, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {i} ^ {(t)} = \\operatorname {D e c} _ {\\text {t e x t}} ^ {i} \\left(\\mathbf {P} _ {i}, \\mathbf {H} _ {i}\\right) = \\left\\{w _ {1} ^ {(t, i)}, \\dots , w _ {m} ^ {(t, i)} \\right\\}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.598, + 0.905, + 0.631 + ], + "angle": 0, + "content": "Ultimately, the final textual description \\( S_N^{(C)} \\) is obtained after \\( C \\) iteration steps through \\( N \\) cascaded experts." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.641, + 0.825, + 0.657 + ], + "angle": 0, + "content": "3.2. Cross-Modal Similarity Verification" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.663, + 0.907, + 0.77 + ], + "angle": 0, + "content": "To address noise in VLM-generated captions, we design a sliding window mechanism with semantic consistency verification. This method ensures that only relevant and semantically consistent segments are retained in the final description. Let \\( W_{k} \\) denote the \\( k \\)-th window containing \\( m \\) consecutive tokens \\( \\{w_{km + 1},\\dots ,w_{(k + 1)m}\\} \\). For each window, we compute its cross-modal similarity score:" + }, + { + "type": "equation", + "bbox": [ + 0.596, + 0.78, + 0.905, + 0.815 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {k} = \\frac {\\operatorname {E n c} _ {\\text {v i s}} (I) \\cdot \\operatorname {E n c} _ {\\text {t e x t}} \\left(W _ {k}\\right)}{\\| \\operatorname {E n c} _ {\\text {v i s}} (I) \\| \\| \\operatorname {E n c} _ {\\text {t e x t}} \\left(W _ {k}\\right) \\|}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.907, + 0.9 + ], + "angle": 0, + "content": "where \\(\\mathsf{Enc}_{vis/text}(\\cdot)\\) adopts a lightweight CLIP [59] encoder-decoder with frozen parameters for efficient processing. The similarity score \\(\\alpha_{k}\\) lies within the range [0, 1], with higher values indicating a stronger alignment between the visual and textual information." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.273 + ], + "angle": 0, + "content": "After calculating the cross-modal similarity for each window, we employ an empirical threshold \\(\\tau\\) to filter out low-similarity windows. This threshold helps to identify and discard noisy or irrelevant sections of the generated caption that do not align well with the visual content, thereby reducing the impact of inaccurate or misleading descriptions. Formally, for each window \\(W_{k}\\), if \\(\\alpha_{k} < \\tau\\), the window is discarded as noise. This process effectively prunes windows with low similarity scores, ensuring that only semantically meaningful segments remain. The final denoised description \\(\\hat{S}\\) is obtained by concatenating all windows \\(W_{k}\\) for which \\(\\alpha_{k} \\geq \\tau\\):" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.28, + 0.484, + 0.314 + ], + "angle": 0, + "content": "\\[\n\\hat {S} = \\bigcup_ {\\alpha_ {k} \\geq \\tau} W _ {k}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.321, + 0.484, + 0.352 + ], + "angle": 0, + "content": "Our window size \\( m \\) is flexibly determined and generally adapts dynamically to natural sentence segmentation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.359, + 0.484, + 0.375 + ], + "angle": 0, + "content": "3.3. MMKG Construction for Enhanced Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.381, + 0.484, + 0.517 + ], + "angle": 0, + "content": "LLMs have become increasingly popular for identifying entities, relationships, and attributes within a corpus, which are then organized into a KG. The strength of LLM-based KG generation lies in its capacity to leverage the vast amount of knowledge encoded within these models, allowing them to detect complex and nuanced patterns across diverse data sources. This approach eliminates the need for manual annotation, enabling a highly scalable and domain-adaptive process suitable for a wide range of applications." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.518, + 0.484, + 0.623 + ], + "angle": 0, + "content": "We begin by refining the generated textual description \\(\\hat{S}\\) (VLM-based information), which is then optionally concatenated with any available external textual knowledge \\(T\\) to form the input for KG generation. This combined input is used to generate MMKGs with the help of a LLM [22, 28], leveraging its capacity for multi-hop reasoning and dynamic knowledge integration." + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.633, + 0.483, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\mathcal {G} = \\operatorname {L L M} (\\hat {S} \\oplus T), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.661, + 0.483, + 0.707 + ], + "angle": 0, + "content": "where \\(\\oplus\\) denotes optional concatenation based on the availability of \\(T\\). The resulting graph \\(\\mathcal{G}\\) captures both visual and textual relationships inferred by the LLM." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.707, + 0.319, + 0.722 + ], + "angle": 0, + "content": "We define \\(\\mathcal{G}\\) as a set of triplets:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.731, + 0.483, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\mathcal {G} = \\{(h, r, t) \\mid h, t \\in \\mathcal {E}, r \\in \\mathcal {R} \\}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.755, + 0.484, + 0.831 + ], + "angle": 0, + "content": "where \\(\\mathcal{E}\\) and \\(\\mathcal{R}\\) denote the sets of entities and relations. Entities include objects or concepts from the image or external text, while relations describe connections such as \"is a type of,\" \"part of,\" or \"has property.\" Each triplet \\((h,r,t)\\) links a head entity \\(h\\) and a tail entity \\(t\\) via relation \\(r\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.832, + 0.484, + 0.877 + ], + "angle": 0, + "content": "Multimodal Reasoning Enhancement. To support multimodal reasoning, we retrieve relevant triplets from \\(\\mathcal{G}\\) through structural patterns during LLMs inference:" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.885, + 0.483, + 0.903 + ], + "angle": 0, + "content": "\\[\n\\mathcal {G} _ {q} = \\operatorname {R e t r i e v e} (q, \\mathcal {G}), \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.905, + 0.137 + ], + "angle": 0, + "content": "where \\( \\text{Retrieve}(\\cdot) \\) denotes a retrieval strategy that identifies subgraphs relevant to the query for reasoning. Detailed retrieval strategies are described in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.138, + 0.905, + 0.153 + ], + "angle": 0, + "content": "The augmented prompt integrates multimodal evidence:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.166, + 0.905, + 0.201 + ], + "angle": 0, + "content": "\\[\np _ {\\mathrm {a u g}} = q \\left\\|\\left(\\bigcup_ {(h, r, t) \\in \\mathcal {G} _ {q}} [ h ] \\rightarrow r \\rightarrow [ t ]\\right). \\right. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.207, + 0.907, + 0.314 + ], + "angle": 0, + "content": "Note that we incorporate the storage locations of images in the database during MMKGs construction, enabling the MMKGs to link to visual data. VaLiK enables text-only LLMs to perform multimodal reasoning through \\(\\mathcal{G}\\)'s visual associations, while VLMs refresh knowledge representations by jointly injecting both visual and textual information, significantly mitigating hallucination risks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.328, + 0.638, + 0.344 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.353, + 0.602, + 0.37 + ], + "angle": 0, + "content": "4.1. Setups" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.376, + 0.905, + 0.406 + ], + "angle": 0, + "content": "Evaluation Datasets. We evaluate VaLiK on two multimodal reasoning benchmarks with distinct characteristics:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.407, + 0.905, + 0.527 + ], + "angle": 0, + "content": "- CrisisMMD [3]. This real-world disaster response dataset includes around 35,000 noisy social media postings with paired images and text, each annotated for seven catastrophe categories and four severity levels. Its realistic user-generated content with natural noise and implicit modality correlations provides a rigorous testbed for zero-shot adaptation, with good performance indicating practical relevance in real-world crisis scenarios." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.528, + 0.906, + 0.648 + ], + "angle": 0, + "content": "- ScienceQA [48]. This dataset contains 21,208 multimodal science questions combining textual and visual contexts, with \\(48.7\\%\\) of instances containing images. Questions span physics, chemistry, and biology domains, requiring cross-modal reasoning between textual concepts and visual diagrams. Additionally, ScienceQA offers image captions to aid text-only LLMs in reasoning, allowing a comparison of unimodal approaches." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.649, + 0.907, + 0.8 + ], + "angle": 0, + "content": "Task Formulation. For CrisisMMD, we define three multimodal classification tasks1: (1) binary information relevance filtering, (2) fine-grained humanitarian category recognition, and (3) a consolidated taxonomy with merged categories to reduce label complexity. We omit the unimodal damage assessment to focus on multimodal aspects. For ScienceQA, we follow the original evaluation using multiple metrics: question types, contextual modalities, and educational stages. Performance is assessed through accuracy percentage across these categories." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.8, + 0.905, + 0.845 + ], + "angle": 0, + "content": "Baselines. We conduct a comprehensive evaluation of text-only LLMs, multimodal VLMs, and KGs that enhance LLMs in multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.846, + 0.905, + 0.877 + ], + "angle": 0, + "content": "- For CrisisMMD, we compare text-only LLMs using few-shot prompting (LLaMA-2 [66], GPT-4 [2]," + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.887, + 0.905, + 0.901 + ], + "angle": 0, + "content": "This setting references the repository GitHub and Abavisani et al. [1]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.089, + 0.905, + 0.183 + ], + "angle": 0, + "content": "
TaskText-only LLMsKG-Enhanced LLMs
LLaMA-2GPT-4DeepSeek-R1Qwen2.5LightRAGVaLiK
7B13B70B-7B8B32B70B7B32B72BText-onlyImage-onlyText-Image
Task 162.3263.8063.1566.8367.2363.3163.6165.5365.0467.2867.9567.4969.5268.90
Task 218.3221.8228.8747.2526.5325.4924.7721.0544.5246.9450.5145.1149.5450.02
Task 2 Merged21.4533.1536.8949.4425.8523.5621.5525.5745.3347.0750.2945.9449.0750.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.192, + 0.908, + 0.234 + ], + "angle": 0, + "content": "Table 1. The performance evaluation of text-only LLMs using few-shot prompting without any fine-tuning on the training set. As these models handle text only, test data is formatted as unimodal text for compatibility. In our implementations, both LightRAG and VaLiK adopt Qwen2.5-7B as the base reasoning model. Bold indicates the highest value, and underline indicates the second highest." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.247, + 0.92, + 0.351 + ], + "angle": 0, + "content": "
TaskMultimodal VLMsKG-Enhanced LLMs
CLIPLLaVABLIP-2GPT-4oQwen2-VLVaLiK
ViT-L/147B13B34BFlan-T5-XLOPT-2B-I7B-I72B-I*#+~
Task 143.3654.0060.5856.4461.2938.6268.2047.5662.4565.8060.7868.4461.1168.89
Task 217.8828.0120.1425.1540.8614.2647.587.6032.6847.2125.8048.8827.2349.78
Task 2-M20.7930.6123.4425.0740.7214.2749.557.4234.2048.2827.3149.2729.0949.31
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.361, + 0.908, + 0.403 + ], + "angle": 0, + "content": "Table 2. The performance of multimodal VLMs and KG-enhanced LLMs. The -I suffix denotes instruction-tuned variants. Symbol markers denote KG types and models: the asterisk (*) represents image-only KG with LLaVA-34B, hash (#) indicates image-only KG using Qwen2-VL-72B-I, plus (+) denotes text-image KG with LLaVA-34B, and tilde (\\*) shows text-image KG using Qwen2-VL-72B-I." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.429, + 0.482, + 0.473 + ], + "angle": 0, + "content": "DeepSeek-R1 [26], Qwen-2.5 [77]) and multimodal VLMs (CLIP [57], LLaVA [43], GPT-4o [33], Qwen2-VL [69], BLIP-2 [41])." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.475, + 0.483, + 0.58 + ], + "angle": 0, + "content": "- For ScienceQA, we compare models for general domains in zero/few-shot settings, including text-only LLMs (GPT Model [48], CoT [48], DDCoT [86]), multimodal VLMs (LG-VQA [25], LaVIN [50], BLIP-2, CCOT [53], GraphVis [19]) and Tool-LLM Chameleon [49]. These models are not specifically fine-tuned for scientific tasks, ensuring a fair evaluation of generalization capabilities." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.581, + 0.483, + 0.641 + ], + "angle": 0, + "content": "- We further compare the multimodal reasoning performance of LLMs assisted by KGs, evaluating text-based KGs built with LightRAG [28], and pre-constructed MMKGs such as Visual Genome [38] and Mmkg [46]." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.475, + 0.483, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.644, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Implementation. For MMKG construction, we design a chain of VLMs including BLIP-2, LLaVA, and Qwen2-VL, with the CLIP-ViT-L/14 for pruning. Stronger or additional VLMs could be employed to enhance performance if more computational resources are available. We use the entire training set as the knowledge base and construct MMKGs from the extracted descriptions based on the LightRAG framework. In comparative experiments, the LightRAG method we evaluate utilizes only textual data, while VaLiK employs two configurations: (1) fully image-generated text descriptions (Image-only), and (2) original text combined with image-generated text (Text-Image). Dynamic window partitioning based on sentence length ensures syntactically coherent pruning results. Similarity thresholds are set to \\(\\tau = 0.25\\) for CrisisMMD and \\(\\tau = 0.20\\) for ScienceQA based on empirical evaluations to balance precision and recall. See Appendix E for selection details. We construct the" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.429, + 0.907, + 0.505 + ], + "angle": 0, + "content": "graph using DeepSeek-R1-70B and implement LightRAG's hybrid retrieval approach with Qwen2.5-7B. For graph construction and multimodal reasoning, we utilize \\(1 \\times\\) NVIDIA A100-80GB GPUs. Task-specific prompts are designed to assist LLMs in multimodal reasoning evaluation." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.515, + 0.655, + 0.53 + ], + "angle": 0, + "content": "4.2. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.538, + 0.907, + 0.825 + ], + "angle": 0, + "content": "Multimodal Classification Tasks. We conduct multimodal classification experiments on the CrisisMMD dataset, evaluating both text-only LLMs and multimodal VLMs. Detailed comparative results are provided in Tables 1 and 2. For text-only LLMs, we adopt Qwen2.5-7B as the foundational reasoning model. Remarkably, the VaLiK-enhanced version achieves state-of-the-art (SOTA) performance matching that of the native Qwen2.5-72B model. The image-only KG constructed through VaLiK demonstrates an average accuracy improvement of \\(4.41\\%\\) across tasks, with the text-image variant attaining a \\(4.90\\%\\) enhancement. These improvements significantly surpass the \\(1.22\\%\\) gain obtained by LightRAG using textual KG. We further validate VaLiK's cross-scale applicability through evaluations on Qwen2.5-32B and 72B architectures, observing consistent \\(2.0\\% - 2.5\\%\\) improvements. While not as significant as the 7B model's benefits, this shows that models that have substantial prior knowledge benefit less from external knowledge augmentation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.906, + 0.901 + ], + "angle": 0, + "content": "Unlike text-only LLMs that depend on MMKGs for visual understanding, VLMs primarily benefit from KGs integration through outdated knowledge refreshment. Due to the inherent availability of visual features during inference, VaLiK's performance gains for VLMs remain con" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.089, + 0.904, + 0.428 + ], + "angle": 0, + "content": "
Method#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Human [48]-90.2384.9787.4889.6087.5088.1091.5982.4288.40
GPT-4 [43]-84.0673.4587.3681.8770.7590.7384.6979.1082.69
CoT (GPT-3) [48]173B75.4470.8778.0974.6867.4379.9378.2369.6875.17
CoT (UnifiedQA) [48]223M71.0076.0478.9166.4266.5381.8177.0668.8274.11
CoT (GPT-4) [49]1T+85.4872.4490.2782.6571.4992.8986.6679.0483.99
DDCoT [86]175B80.1576.7282.8278.8972.5385.0282.8675.2180.15
Chameleon (ChatGPT) [49]175B+81.6270.6484.0079.7770.8086.6281.8676.5379.93
LG-VQA (BLIP-2) [25]---------86.32
LaVIN-13B [78]---------77.54
BLIP-2 [78]---------74.17
CCOT7B--------76.84
GraphVis [19]7B--------73.18
Qwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
Qwen2.5-72B72B79.6467.1084.9077.5665.0087.9380.2574.8578.37
Qwen2.5-7B (Mmkg) [46]7B73.9866.3778.1871.6564.3079.6576.5168.0373.47
Qwen2.5-7B (Visual Genome) [38]7B76.7867.0478.0974.0566.1979.7278.0869.6875.08
Qwen2.5-7B (VaLiK Text-only)7B84.5474.2486.9182.7472.5390.0384.5180.2882.98
Qwen2.5-7B (VaLiK Image-only)7B79.1471.5479.2777.1669.7283.1480.6573.9678.88
Qwen2.5-7B (VaLiK Text-Image)7B84.1575.1487.6482.9973.1889.6984.4080.9583.16
Qwen2.5-72B (VaLiK Text-Image)72B85.6175.9390.2784.4074.1792.3385.7982.9884.77
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.438, + 0.907, + 0.496 + ], + "angle": 0, + "content": "Table 3. Performance comparison (\\%) on ScienceQA benchmark. #T-Params denotes trainable parameters. Categories: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG-Cap (image caption), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12). Method groups: (1) Human performance baseline, (2) Zero/Few-shot text-only LLMs, (3) Zero/Few-shot Multimodal VLMs, (4) LLMs enhanced with knowledge graphs for multimodal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.521, + 0.482, + 0.732 + ], + "angle": 0, + "content": "strained compared to text-only counterparts. We separately applied VaLiK enhancement to Qwen2-VL-72B-Instruct and LLaVA-34B, obtaining distinct improvements: LLaVA-34B achieves accuracy gains of \\(2.41\\%\\) (image-only KG) and \\(3.59\\%\\) (text-image KG), while Qwen2-VL-72B-Instruct shows \\(1.77\\%\\) and \\(2.23\\%\\) improvements respectively under identical configurations. These experimental findings collectively demonstrate that VaLiK effectively extracts valuable signals from the training corpus and enables dynamic knowledge injection into VLMs during inference, thereby substantially alleviating hallucination phenomena. The differential improvements between Qwen2-VL-72B-Instruct and LLaVA-34B further validate the framework's adaptability across model architectures." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.733, + 0.483, + 0.884 + ], + "angle": 0, + "content": "Additionally, we analyze the results of LLMs without KG enhancement in the tables, which generally follow the scaling law [37]. However, DeepSeek-R1 shows anomalous behavior. Through testing, we find that its reasoning process may introduce complex information that interferes with its judgment. Furthermore, empirical results show that most baseline models achieve suboptimal performance without fine-tuning. In contrast, VaLiK's automated MMKG construction framework requires no task-specific adaptation yet delivers consistent improvements." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.886, + 0.482, + 0.901 + ], + "angle": 0, + "content": "Multimodal Question Answering Tasks. We evalu" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.521, + 0.905, + 0.672 + ], + "angle": 0, + "content": "ated multimodal QA performance on the ScienceQA benchmark with Qwen2.5-7B and Qwen2.5-72B as base architectures, augmented by four knowledge sources: Mmkg, Visual Genome, text-only LightRAG and VaLiK. Compared to existing zero-shot/few-shot LLMs that not specifically optimized for scientific QA, our VaLiK-enhanced Qwen2.5-72B achieved SOTA performance on \\(62.5\\%\\) of subtasks, demonstrating particular strengths in multimodal reasoning scenarios requiring cross-modal alignment with an average accuracy gain of \\(6.4\\%\\) over baseline models." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.674, + 0.907, + 0.87 + ], + "angle": 0, + "content": "Our study identifies a fundamental imbalance between textual and visual knowledge representations in ScienceQA. Text-only KGs (14k entities, 18k relations) exhibit \\(8 \\times\\) denser structured knowledge than image-only counterparts (3k concepts, 1k relations), explaining visual modality underperformance. Despite this gap, vision-KG-augmented Qwen2.5-7B still attains \\(4.16\\%\\) accuracy gains over its non-enhanced version. Notably, our MMKG requires only 489MB storage for complete storage, while the scene graph component2 of Visual Genome alone occupies 739MB. This lightweight construction enables effective reasoning using only textual KG descriptions without raw images in resource-constrained scenarios." + }, + { + "type": "page_footnote", + "bbox": [ + 0.532, + 0.887, + 0.625, + 0.9 + ], + "angle": 0, + "content": "2Visual Genome" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.089, + 0.907, + 0.192 + ], + "angle": 0, + "content": "
TypeMethod#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Image-OnlyQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B80.06 (↑3.86)70.30 (↑2.47)80.55 (↑3.28)78.05 (↑3.56)68.43 (↑2.64)83.76 (↑4.74)81.17 (↑3.45)72.71 (↑3.36)78.14 (↑3.42)
+ SV7B79.14 (↓0.92)71.54 (↑1.24)79.27 (↓1.28)77.16 (↓0.89)69.72 (↑1.29)83.14 (↓0.62)80.65 (↓0.52)73.96 (↑1.25)78.88 (↑0.74)
Text-ImageQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B81.88 (↑5.68)73.00 (↑5.17)84.00 (↑6.73)80.55 (↑6.06)70.05 (↑4.26)87.11 (↑8.09)82.01 (↑4.29)77.98 (↑8.63)80.57 (↑5.85)
+ SV7B84.15 (↑2.27)75.14 (↑2.14)87.64 (↑3.64)82.99 (↑2.44)73.18 (↑3.13)89.69 (↑2.58)84.40 (↑2.39)80.95 (↑2.97)83.16 (↑2.59)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.21, + 0.907, + 0.255 + ], + "angle": 0, + "content": "Table 4. Ablation study on ScienceQA benchmark (CVs: CoE-based Vision-Language Models; SV: Similarly Verification). Performance metrics include: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG (image context), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12)." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.264, + 0.486, + 0.357 + ], + "angle": 0, + "content": "
TypeMethodTask 1 (%)Task 2 (%)Task 2-Merged (%)
Image-OnlyQwen2.5-7B65.0444.5245.33
+ CVs68.11 (↑3.07)47.00 (↑2.48)46.95 (↑1.62)
+ SV69.52 (↑1.41)49.54 (↑2.54)49.07 (↑2.12)
Text-ImageQwen2.5-7B65.0444.5245.33
+ CVs68.43 (↑3.39)48.61 (↑4.09)48.97 (↑3.64)
+ SV68.90 (↑0.47)50.02 (↑1.41)50.69 (↑1.72)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.375, + 0.461, + 0.39 + ], + "angle": 0, + "content": "Table 5. Ablation study on CrisisMMD with Qwen2.5-7B." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.405, + 0.245, + 0.422 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.428, + 0.485, + 0.777 + ], + "angle": 0, + "content": "Our ablation studies on CrisisMMD and ScienceQA demonstrate the specific roles of VaLiK's components. As shown in Table 4 and Table 5, the CVs (CoE-based VLM) module improves accuracy across all settings, with average gains of \\(+3.05\\%\\) on CrisisMMD and \\(+4.63\\%\\) on ScienceQA tasks, validating visual descriptions enhance reasoning. However, the SV (Similarly Verification) module exhibits dual effects: it significantly improves CrisisMMD metrics by pruning redundant textual descriptions, yet slightly degrades ScienceQA's image-only natural science reasoning. We hypothesize this discrepancy arises from dataset characteristics: CrisisMMD's generated captions contain substantially more redundant content, whereas ScienceQA's simpler visual scenes yield shorter descriptions. Pruning these shorter descriptions risks over-removal of critical semantics. Furthermore, different types of KGs influence the effectiveness of the components: CVs achieve greater gains in CrisisMMD's text-image fusion as original text provides complementary context, while SV shows reduced effectiveness, likely due to occasional over-pruning of cross-modal linkages. Nevertheless, both modules collectively enhance performance across configurations, demonstrating their synergistic yet context-sensitive nature." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.788, + 0.259, + 0.804 + ], + "angle": 0, + "content": "4.4. Further Analysis" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.485, + 0.903 + ], + "angle": 0, + "content": "Impact of VLM Quantity and Types. We evaluate the impact of varying quantities and types of VLMs on the CVs module. Our experiments reveal that Qwen2-VL generates the most visual descriptions, followed by LLaVA, while BLIP-2 produces the fewest. However, BLIP-2 demonstrates superior capability in extracting critical information" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.264, + 0.712, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.712, + 0.264, + 0.905, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.387, + 0.887, + 0.403 + ], + "angle": 0, + "content": "Figure 4. Impact analysis of VLM quantity on CrisisMMD." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.413, + 0.907, + 0.534 + ], + "angle": 0, + "content": "and identifying key entity relationships within images. We therefore adopt BLIP-2 as the primary model, with LLaVA or Qwen2-VL serving as secondary/tertiary components. Adding more VLMs yields diminishing returns, due to limited entities in current images, though we hypothesize their benefits would increase for complex visual scenes with richer semantic content. This phenomenon is empirically validated by our quantitative results in Figure 4." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.537, + 0.907, + 0.598 + ], + "angle": 0, + "content": "Computational Costs. Due to space limitations, we provide an overview of VaLiK's computational costs in Appendix F. Our method is significantly more cost-effective than manual annotation or LLM fine-tuning." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.617, + 0.634, + 0.633 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.644, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Multimodal reasoning in LLMs is constrained by incomplete knowledge and hallucination artifacts, limitations that persist because textual KGs cannot bridge visual-textual semantics due to their modality isolation. To bridge this gap, we propose VaLiK, a framework for constructing MMKGs through vision-language alignment, eliminating dependency on manual annotations while resolving visual-textual semantic inconsistencies. By integrating a cascade of pretrained VLMs and cross-modal verification, VaLiK converts images into structured knowledge while filtering noise. The resulting graphs enhance LLMs' reasoning with minimal storage overhead. Experiments on multimodal reasoning benchmarks show SOTA performance. VaLiK's modular design supports adaptability across domains, offering a scalable solution for autonomous knowledge synthesis. This work advances multimodal AI systems by enabling efficient integration of visual and textual data." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.27, + 0.108 + ], + "angle": 0, + "content": "6. Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.116, + 0.484, + 0.192 + ], + "angle": 0, + "content": "The research was supported by Shanghai Artificial Intelligence Laboratory, the National Key R&D Program of China (Grant No. 2022ZD0160201) and the Science and Technology Commission of Shanghai Municipality (Grant No. 22DZ1100102)." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.208, + 0.188, + 0.224 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.233, + 0.484, + 0.302 + ], + "angle": 0, + "content": "[1] Mahdi Abavisani, Liwei Wu, Shengli Hu, Joel Tetreault, and Alejandro Jaimes. Multimodal categorization of crisis events in social media. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.304, + 0.484, + 0.373 + ], + "angle": 0, + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.376, + 0.484, + 0.432 + ], + "angle": 0, + "content": "[3] Firoj Alam, Ferda Ofli, and Muhammad Imran. Crisismmd: Multimodal twitter datasets from natural disasters. Proceedings of the International AAAI Conference on Web and Social Media, 12(1), 2018. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.434, + 0.485, + 0.585 + ], + "angle": 0, + "content": "[4] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob L Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikol aj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karén Simonyan. Flamingo: a visual language model for few-shot learning. In Advances in Neural Information Processing Systems, pages 23716-23736. Curran Associates, Inc., 2022. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.588, + 0.484, + 0.629 + ], + "angle": 0, + "content": "[5] Razvan Azamfirei, Sapna R Kudchadkar, and James Fackler. Large language models and the perils of their hallucinations. Critical Care, 27(1):120, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.632, + 0.484, + 0.7 + ], + "angle": 0, + "content": "[6] Jinheon Baek, Alham Fikri Aji, and Amir Saffari. Knowledge-augmented language model prompting for zero-shot knowledge graph question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.703, + 0.484, + 0.758 + ], + "angle": 0, + "content": "[7] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. Multimodal machine learning: A survey and taxonomy. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41(2):423-443, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.761, + 0.484, + 0.844 + ], + "angle": 0, + "content": "[8] Dawei Chen, Zhixu Li, Binbin Gu, and Zhigang Chen. Multimodal named entity recognition with image attributes and image knowledge. In Database Systems for Advanced Applications: 26th International Conference, DASFAA 2021, Taipei, Taiwan, April 11–14, 2021, Proceedings, Part II 26, pages 186–201. Springer, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.846, + 0.484, + 0.901 + ], + "angle": 0, + "content": "[9] Jiawei Chen, Hongyu Lin, Xianpei Han, and Le Sun. Benchmarking large language models in retrieval-augmented generation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17754-17762, 2024. 1" + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.233, + 0.485, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.16 + ], + "angle": 0, + "content": "[10] Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems (NeurIPS), 33:22243-22255, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.163, + 0.906, + 0.245 + ], + "angle": 0, + "content": "[11] Xiang Chen, Ningyu Zhang, Lei Li, Shumin Deng, Chuanqi Tan, Changliang Xu, Fei Huang, Luo Si, and Huajun Chen. Hybrid transformer with multi-level fusion for multimodal knowledge graph completion. In Proceedings of the International Conference on Research and Development in Information Retrieva (SIGIR), pages 904-915, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.247, + 0.906, + 0.315 + ], + "angle": 0, + "content": "[12] Xi Chen, Josip Djolonga, Piotr Padlewski, Basil Mustafa, Soravit Changpinyo, Jialin Wu, Carlos Riquelme Ruiz, Sebastian Goodman, Xiao Wang, Yi Tay, et al. Pali-x: On scaling up a multilingual vision and language model. arXiv preprint arXiv:2305.18565, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.317, + 0.906, + 0.385 + ], + "angle": 0, + "content": "[13] Xiang Chen, Jingtian Zhang, Xiaohan Wang, Ningyu Zhang, Tongtong Wu, Yuxiang Wang, Yongheng Wang, and Huajun Chen. Continual multimodal knowledge graph construction. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.386, + 0.906, + 0.453 + ], + "angle": 0, + "content": "[14] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.455, + 0.906, + 0.509 + ], + "angle": 0, + "content": "[15] Yong Chen, Xinkai Ge, Shengli Yang, Linmei Hu, Jie Li, and Jinwen Zhang. A survey on multimodal knowledge graphs: Construction, completion and applications. Mathematics, 11 (8), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.511, + 0.906, + 0.579 + ], + "angle": 0, + "content": "[16] Zhuo Chen, Yichi Zhang, Yin Fang, Yuxia Geng, Lingbing Guo, Xiang Chen, Qian Li, Wen Zhang, Jiaoyan Chen, Yushan Zhu, et al. Knowledge graphs meet multimodal learning: A comprehensive survey. arXiv preprint arXiv:2402.05391, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.581, + 0.906, + 0.649 + ], + "angle": 0, + "content": "[17] Shiyao Cui, Jiangxia Cao, Xin Cong, Jiawei Sheng, Quanggang Li, Tingwen Liu, and Jinqiao Shi. Enhancing multimodal entity and relation extraction with variational information bottleneck. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 32:1274-1285, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.65, + 0.906, + 0.704 + ], + "angle": 0, + "content": "[18] Xiaohui Cui, Xiaolong Qu, Dongmei Li, Yu Yang, Yuxun Li, and Xiaoping Zhang. Mkgcn: Multi-modal knowledge graph convolutional network for music recommender systems. *Electronics*, 12(12), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.706, + 0.906, + 0.774 + ], + "angle": 0, + "content": "[19] Yihe Deng, Chenchen Ye, Zijie Huang, Mingyu Derek Ma, Yiwen Kou, and Wei Wang. Graphvis: Boosting llms with visual knowledge graph integration. In Advances in Neural Information Processing Systems, pages 67511-67534. Curran Associates, Inc., 2024. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.776, + 0.906, + 0.829 + ], + "angle": 0, + "content": "[20] Tausif Diwan, G. Anirudh, and Jitendra V. Tembhurne. Object detection using yolo: challenges, architectural successors, datasets and applications. Multimedia Tools Appl., 82 (6):9243-9275, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.906, + 0.871 + ], + "angle": 0, + "content": "[21] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.873, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[22] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropoli" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.482, + 0.134 + ], + "angle": 0, + "content": "tansky, Robert Osazuwa Ness, and Jonathan Larson. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.135, + 0.483, + 0.19 + ], + "angle": 0, + "content": "[23] Quan Fang, Xiaowei Zhang, Jun Hu, Xian Wu, and Changsheng Xu. Contrastive multi-modal knowledge graph representation learning. IEEE Transactions on Knowledge and Data Engineering, 35(9):8983-8996, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.19, + 0.482, + 0.246 + ], + "angle": 0, + "content": "[24] Duoduo Feng, Xiangteng He, and Yuxin Peng. Mkvse: Multimodal knowledge enhanced visual-semantic embedding for image-text retrieval. ACM Trans. Multimedia Comput. Commun. Appl., 19(5), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.246, + 0.482, + 0.315 + ], + "angle": 0, + "content": "[25] Deepanway Ghosal, Navonil Majumder, Roy Ka-Wei Lee, Rada Mihalcea, and Soujanya Poria. Language guided visual question answering: Elevate your multimodal language model using knowledge-enriched prompts. arXiv preprint arXiv:2310.20159, 2023. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.316, + 0.483, + 0.384 + ], + "angle": 0, + "content": "[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.385, + 0.483, + 0.455 + ], + "angle": 0, + "content": "[27] Yunfei Guo, Fei Yin, Xiao-hui Li, Xudong Yan, Tao Xue, Shuqi Mei, and Cheng-Lin Liu. Visual traffic knowledge graph generation from scene images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 21604-21613, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.455, + 0.482, + 0.496 + ], + "angle": 0, + "content": "[28] ZIRUI GUO, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. LightRAG: Simple and fast retrieval-augmented generation, 2024. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.497, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[29] Lavdim Halilaj, Juergen Luettin, Sebastian Monka, Cory Henson, and Stefan Schmid. Knowledge graph-based integration of autonomous driving datasets. International Journal of Semantic Computing, 17(02):249-271, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.553, + 0.482, + 0.609 + ], + "angle": 0, + "content": "[30] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.609, + 0.482, + 0.678 + ], + "angle": 0, + "content": "[31] Yang Hu, Guihua Wen, Adriane Chapman, Pei Yang, Mingnan Luo, Yingxue Xu, Dan Dai, and Wendy Hall. Graph-based visual-semantic entanglement network for zero-shot image recognition. IEEE Transactions on Multimedia, 24: 2473-2487, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.679, + 0.482, + 0.775 + ], + "angle": 0, + "content": "[32] Zhiqiang Hu, Lei Wang, Yihuai Lan, Wanyu Xu, Ee-Peng Lim, Lidong Bing, Xing Xu, Soujanya Poria, and Roy Lee. LLM-adapters: An adapter family for parameter-efficient fine-tuning of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5254-5276, Singapore, 2023. Association for Computational Linguistics. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.776, + 0.482, + 0.831 + ], + "angle": 0, + "content": "[33] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.831, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[34] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 2" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.162 + ], + "angle": 0, + "content": "[35] Adam Tauman Kalai and Santosh S. Vempala. Calibrated language models must hallucinate. In Proceedings of the 56th Annual ACM Symposium on Theory of Computing, page 160–171, New York, NY, USA, 2024. Association for Computing Machinery. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.162, + 0.905, + 0.274 + ], + "angle": 0, + "content": "[36] Amar Viswanathan Kannan, Dmitriy Fradkin, Ioannis Akrotirianakis, Tugba Kulahcioglu, Arquimedes Canedo, Aditi Roy, Shih-Yuan Yu, Malawade Arnav, and Mohammad Abdullah Al Faruque. Multimodal knowledge graph for deep learning papers and code. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 3417-3420, New York, NY, USA, 2020. Association for Computing Machinery. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.274, + 0.905, + 0.342 + ], + "angle": 0, + "content": "[37] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.342, + 0.905, + 0.426 + ], + "angle": 0, + "content": "[38] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.426, + 0.905, + 0.509 + ], + "angle": 0, + "content": "[39] Junlin Lee, Yequan Wang, Jing Li, and Min Zhang. Multimodal reasoning with multimodal knowledge graph. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10767-10782, Bangkok, Thailand, 2024. Association for Computational Linguistics. 2, 3, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.51, + 0.905, + 0.58 + ], + "angle": 0, + "content": "[40] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In Proceedings of the 39th International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.58, + 0.905, + 0.65 + ], + "angle": 0, + "content": "[41] Junnan Li, Dongxu Li, Silvio Savarese, and Steven C. H. Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning (ICML), pages 19730–19742, 2023. 1, 3, 4, 6, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.65, + 0.905, + 0.718 + ], + "angle": 0, + "content": "[42] Xin Li, Dongze Lian, Zhihe Lu, Jiawang Bai, Zhibo Chen, and Xinchao Wang. Graphadapter: Tuning vision-language models with dual knowledge graph. In Advances in Neural Information Processing Systems, pages 13448-13466. Curran Associates, Inc., 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.719, + 0.905, + 0.775 + ], + "angle": 0, + "content": "[43] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In Advances in Neural Information Processing Systems, pages 34892-34916. Curran Associates, Inc., 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.776, + 0.905, + 0.845 + ], + "angle": 0, + "content": "[44] Junming Liu, Yanting Gao, Siyuan Meng, Yifei Sun, Aoqi Wu, Yufei Jin, Yirong Chen, Ding Wang, and Guosun Zeng. Mosaic: Data-free knowledge distillation via mixture-of-experts for heterogeneous distributed environments. arXiv preprint arXiv:2505.19699, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.845, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[45] Junming Liu, Guosun Zeng, Ding Wang, Yanting Gao, and Yufei Jin. Fedrecon: Missing modality reconstruction in distributed heterogeneous environments. arXiv preprint arXiv:2504.09941, 2025.3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.926, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.174 + ], + "angle": 0, + "content": "[46] Ye Liu, Hui Li, Alberto Garcia-Duran, Mathias Niepert, Daniel Onoro-Rubio, and David S Rosenblum. Mmkg: multi-modal knowledge graphs. In The Semantic Web: 16th International Conference, ESWC 2019, Portoroz, Slovenia, June 2–6, 2019, Proceedings 16, pages 459–474. Springer, 2019. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.176, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[47] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.247, + 0.482, + 0.328 + ], + "angle": 0, + "content": "[48] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Advances in Neural Information Processing Systems, pages 2507–2521. Curran Associates, Inc., 2022. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.33, + 0.482, + 0.411 + ], + "angle": 0, + "content": "[49] Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Advances in Neural Information Processing Systems, pages 43447-43478. Curran Associates, Inc., 2023. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.413, + 0.482, + 0.481 + ], + "angle": 0, + "content": "[50] Gen Luo, Yiyi Zhou, Tianhe Ren, Shengxin Chen, Xiaoshuai Sun, and Rongrong Ji. Cheap and quick: Efficient vision-language instruction tuning for large language models. In Advances in Neural Information Processing Systems, pages 29615-29627. Curran Associates, Inc., 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.483, + 0.482, + 0.55 + ], + "angle": 0, + "content": "[51] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.552, + 0.482, + 0.607 + ], + "angle": 0, + "content": "[52] Runqing Miao, Qingxuan Jia, Fuchun Sun, Gang Chen, Haiming Huang, and Shengyi Miao. Semantic representation of robot manipulation with knowledge graph. Entropy, 25(4), 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.608, + 0.482, + 0.678 + ], + "angle": 0, + "content": "[53] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.679, + 0.482, + 0.731 + ], + "angle": 0, + "content": "[54] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.733, + 0.482, + 0.815 + ], + "angle": 0, + "content": "[55] Bryan A. Plummer, Liwei Wang, Chris M. Cervantes, Juan C. Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2015. 2, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.818, + 0.482, + 0.872 + ], + "angle": 0, + "content": "[56] Shengsheng Qian, Jun Hu, Quan Fang, and Changsheng Xu. Knowledge-aware multi-modal adaptive graph convolutional networks for fake news detection. ACM Trans. Multimedia Comput. Commun. Appl., 17(3), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[57] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry," + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.905, + 0.161 + ], + "angle": 0, + "content": "Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.163, + 0.905, + 0.231 + ], + "angle": 0, + "content": "[58] Brian Reily, Christopher Reardon, and Hao Zhang. Representing multi-robot structure through multimodal graph embedding for the selection of robot teams. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 5576–5582, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.233, + 0.905, + 0.3 + ], + "angle": 0, + "content": "[59] Joshua Robinson, Christopher Michael Ryting, and David Wingate. Leveraging large language models for multiple choice question answering. In Proceedings of the International Conference on Learning Representations (ICLR), 2023. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.302, + 0.905, + 0.399 + ], + "angle": 0, + "content": "[60] Hrituraj Singh, Anshul Nasery, Denil Mehta, Aishwarya Agarwal, Jatin Lamba, and Balaji Vasan Srinivasan. MI-MOQA: Multimodal input multimodal output question answering. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5317-5332, Online, 2021. Association for Computational Linguistics. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.4, + 0.905, + 0.467 + ], + "angle": 0, + "content": "[61] Yaoxian Song, Penglei Sun, Haoyu Liu, Zhixu Li, Wei Song, Yanghua Xiao, and Xiaofang Zhou. Scene-driven multimodal knowledge graph construction for embodied ai. IEEE Transactions on Knowledge and Data Engineering, 36(11): 6962-6976, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.469, + 0.905, + 0.566 + ], + "angle": 0, + "content": "[62] Rui Sun, Xuezhi Cao, Yan Zhao, Junchen Wan, Kun Zhou, Fuzheng Zhang, Zhongyuan Wang, and Kai Zheng. Multimodal knowledge graphs for recommender systems. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 1405-1414, New York, NY, USA, 2020. Association for Computing Machinery. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.567, + 0.905, + 0.635 + ], + "angle": 0, + "content": "[63] Yu Sun, Shuohuan Wang, Shikun Feng, Siyu Ding, Chao Pang, Junyuan Shang, Jiaxiang Liu, Xuyi Chen, Yanbin Zhao, Yuxiang Lu, et al. Ernie 3.0: Large-scale knowledge enhanced pre-training for language understanding and generation. arXiv preprint arXiv:2107.02137, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.637, + 0.905, + 0.704 + ], + "angle": 0, + "content": "[64] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.706, + 0.905, + 0.802 + ], + "angle": 0, + "content": "[65] Shengbang Tong, Ellis L Brown II, Penghao Wu, Sanghyun Woo, ADITHYA JAIRAM IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, Xichen Pan, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.804, + 0.905, + 0.872 + ], + "angle": 0, + "content": "[66] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.873, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[67] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.482, + 0.133 + ], + "angle": 0, + "content": "Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.136, + 0.483, + 0.191 + ], + "angle": 0, + "content": "[68] Peng Wang, Qi Wu, Chunhua Shen, Anthony Dick, and Anton van den Hengel. Fvqa: Fact-based visual question answering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(10):2413-2427, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.193, + 0.482, + 0.262 + ], + "angle": 0, + "content": "[69] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.264, + 0.482, + 0.347 + ], + "angle": 0, + "content": "[70] Xin Wang, Benyuan Meng, Hong Chen, Yuan Meng, Ke Lv, and Wenwu Zhu. Tiva-kg: A multimodal knowledge graph with text, image, video and audio. In Proceedings of the 31st ACM International Conference on Multimedia, page 2391-2399, New York, NY, USA, 2023. Association for Computing Machinery. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.349, + 0.482, + 0.404 + ], + "angle": 0, + "content": "[71] Yuequn Wang, Liyan Dong, Hao Zhang, Xintao Ma, Yongli Li, and Minghui Sun. An enhanced multi-modal recommendation based on alternate training with knowledge graph representation. IEEE Access, 8:213012-213026, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.406, + 0.482, + 0.489 + ], + "angle": 0, + "content": "[72] Tao Wu, Mengze Li, Jingyuan Chen, Wei Ji, Wang Lin, Jinyang Gao, Kun Kuang, Zhou Zhao, and Fei Wu. Semantic alignment for multimodal large language models. In Proceedings of the 32nd ACM International Conference on Multimedia, page 3489-3498, New York, NY, USA, 2024. Association for Computing Machinery. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.491, + 0.482, + 0.546 + ], + "angle": 0, + "content": "[73] Yike Wu, Nan Hu, Guilin Qi, Sheng Bi, Jie Ren, Anhuan Xie, and Wei Song. Retrieve-rewrite-answer: A kg-to-text enhanced llms framework for knowledge graph question answering. arXiv preprint arXiv:2309.11206, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.548, + 0.482, + 0.63 + ], + "angle": 0, + "content": "[74] Ziyang Xiao, Dongxiang Zhang, Yangjun Wu, Lilin Xu, Yuan Jessica Wang, Xiongwei Han, Xiaojin Fu, Tao Zhong, Jia Zeng, Mingli Song, and Gang Chen. Chain-of-experts: When LLMs meet complex operations research problems. In The Twelfth International Conference on Learning Representations, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.633, + 0.483, + 0.744 + ], + "angle": 0, + "content": "[75] Dexuan Xu, Yanyuan Chen, Jieyi Wang, Yue Huang, Hanpin Wang, Zhi Jin, Hongxing Wang, Weihua Yue, Jing He, Hang Li, and Yu Huang. MLeVLM: Improve multi-level progressive capabilities based on multimodal large language model for medical visual question answering. In Findings of the Association for Computational Linguistics: ACL 2024, pages 4977-4997, Bangkok, Thailand, 2024. Association for Computational Linguistics. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.483, + 0.8 + ], + "angle": 0, + "content": "[76] Peng Xu, Xiatian Zhu, and David A. Clifton. Multimodal learning with transformers: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(10):12113-12132, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.802, + 0.483, + 0.857 + ], + "angle": 0, + "content": "[77] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2. 5 technical report. arXiv preprint arXiv:2412.15115, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.859, + 0.483, + 0.902 + ], + "angle": 0, + "content": "[78] Xiaocui Yang, Wenfang Wu, Shi Feng, Ming Wang, Daling Wang, Yang Li, Qi Sun, Yifei Zhang, Xiaoming Fu, and Soujanya Poria. Mm-bigbench: Evaluating multimodal models" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "on multimodal content comprehension tasks. arXiv preprint arXiv:2310.09036, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[79] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.905, + 0.235 + ], + "angle": 0, + "content": "[80] Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.236, + 0.905, + 0.305 + ], + "angle": 0, + "content": "[81] Jingtong Yue, Zhiwei Lin, Xin Lin, Xiaoyu Zhou, Xiangtai Li, Lu Qi, Yongtao Wang, and Ming-Hsuan Yang. RobuR-CDet: Enhancing robustness of radar-camera fusion in bird's eye view for 3d object detection. In The Thirteenth International Conference on Learning Representations, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.307, + 0.905, + 0.374 + ], + "angle": 0, + "content": "[82] Yichi Zhang, Zhuo Chen, Lingbing Guo, Yajing Xu, Binbin Hu, Ziqi Liu, Huajun Chen, and Wen Zhang. Mygo: Discrete modality information as fine-grained tokens for multi-modal knowledge graph completion. CoRR, abs/2404.09468, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.377, + 0.905, + 0.433 + ], + "angle": 0, + "content": "[83] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multimodal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.435, + 0.905, + 0.49 + ], + "angle": 0, + "content": "[84] Jiabao Zhao, Xin Lin, Jie Zhou, Jing Yang, Liang He, and Zhaohui Yang. Knowledge-based fine-grained classification for few-shot learning. In 2020 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.492, + 0.905, + 0.575 + ], + "angle": 0, + "content": "[85] Changmeng Zheng, Junhao Feng, Ze Fu, Yi Cai, Qing Li, and Tao Wang. Multimodal relation extraction with efficient graph alignment. In Proceedings of the 29th ACM International Conference on Multimedia, page 5298-5306, New York, NY, USA, 2021. Association for Computing Machinery. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.577, + 0.905, + 0.645 + ], + "angle": 0, + "content": "[86] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. In Advances in Neural Information Processing Systems, pages 5168-5191. Curran Associates, Inc., 2023. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.647, + 0.905, + 0.716 + ], + "angle": 0, + "content": "[87] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In 2024 IEEE 40th International Conference on Data Engineering (ICDE), pages 70-82, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.718, + 0.905, + 0.785 + ], + "angle": 0, + "content": "[88] Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaogian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.788, + 0.905, + 0.857 + ], + "angle": 0, + "content": "[89] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.859, + 0.905, + 0.902 + ], + "angle": 0, + "content": "[90] Xiangru Zhu, Zhixu Li, Xiaodan Wang, Xueyao Jiang, Penglei Sun, Xuwu Wang, Yanghua Xiao, and Nicholas Jing Yuan. Multi-modal knowledge graph construction and ap" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.092, + 0.484, + 0.12 + ], + "angle": 0, + "content": "plication: A survey. IEEE Transactions on Knowledge and Data Engineering, 36(2):715-735, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.121, + 0.484, + 0.163 + ], + "angle": 0, + "content": "[91] Zhengxia Zou, Keyan Chen, Zhenwei Shi, Yuhong Guo, and Jieping Ye. Object detection in 20 years: A survey. Proceedings of the IEEE, 111(3):257-276, 2023. 3" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.484, + 0.163 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.107, + 0.086, + 0.892, + 0.132 + ], + "angle": 0, + "content": "Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.179, + 0.48, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.419, + 0.484, + 0.475 + ], + "angle": 0, + "content": "Figure 5. (a) The limited information contained in text-based KGs leads to inaccurate responses. (b) Leveraging MMKGs enables reasoning with enriched multimodal information to produce the correct answer." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.492, + 0.484, + 0.526 + ], + "angle": 0, + "content": "A. Cross-Modal Reasoning Failures in Textual KGs" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.539, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Multimodal learning, by virtue of its capability to synergistically integrate heterogeneous data modalities, establishes a comprehensive knowledge acquisition paradigm that significantly enhances reasoning robustness [39]. This principle extends to Multimodal Knowledge Graphs (MMKGs), where the semantic symbiosis between visual and textual modalities addresses the critical limitation of modal isolation inherent in conventional text-based KGs. As empirically demonstrated in Figure 5, pure textual KGs often induce hallucinated or incomplete responses due to their inability to resolve visual-textual semantic ambiguities. For instance, when queried about fine-grained visual attributes (e.g., spatial relationships or object properties absent in textual metadata), LLMs grounded solely on textual KG triples frequently generate plausible but factually inconsistent answers, as they lack access to cross-modal referential grounding. In contrast, MMKGs bridge this gap through bidirectional visual-textual entity linking, enabling LLMs to retrieve and reason over fused evidence from both modalities. Our qualitative analysis of the case in Figure 5 reveals that the multimodal reasoning path—leveraging both image-derived entities and textual relationships—is essential for deriving logically coherent and factually accurate conclusions." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.179, + 0.905, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.34, + 0.907, + 0.367 + ], + "angle": 0, + "content": "Figure 6. Three example social media posts with labelled named entities [8]." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.37, + 0.915, + 0.536 + ], + "angle": 0, + "content": "
Type#ChainsMentions/ChainBoxes/Chain
people597663.171.95
clothing423801.761.44
body parts128091.501.42
animals50863.631.44
vehicles55612.771.21
instruments18272.851.61
scene469192.030.62
other820981.941.04
total2440352.101.13
" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.546, + 0.907, + 0.601 + ], + "angle": 0, + "content": "Table 6. Coreference chain statistics of Flickr30K-Entity. The number of mentions per chain indicates how salient an entity is. The number of boxes per chain indicates how many distinct entities it refers to." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.63, + 0.905, + 0.663 + ], + "angle": 0, + "content": "B. Case Studies on Manual Annotation Overheads" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.675, + 0.907, + 0.901 + ], + "angle": 0, + "content": "The development of robust entity extraction models typically hinges on large-scale annotated corpora, yet the generalizability of these models remains intrinsically bounded by the semantic scope and granularity of their training datasets. Widely-adopted benchmarks such as Flickr30K-Entity [55] exemplify this constraint: while serving as de facto standards for evaluating visual-linguistic entity grounding, their construction necessitates labor-intensive manual annotations at scale. As illustrated in Figure 6, even high-quality annotations in such datasets often adopt a minimalist tagging paradigm—identifying only coarse-grained entities while neglecting fine-grained attributes and contextual relationships. This sparsity of semantic enrichment directly propagates to trained models, which consequently fail to capture the compositional semantics necessary for com" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.089, + 0.482, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.271, + 0.483, + 0.315 + ], + "angle": 0, + "content": "Figure 7. An example from the ScienceQA benchmark [48], illustrating multimodal question-answering scenarios that necessitate joint reasoning over textual prompts and visual evidence." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.325, + 0.262, + 0.34 + ], + "angle": 0, + "content": "plex reasoning scenarios." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.352, + 0.483, + 0.387 + ], + "angle": 0, + "content": "C. Case Studies on Visual Specificity Deficits in VLM-Generated Captions" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.395, + 0.485, + 0.668 + ], + "angle": 0, + "content": "As exemplified in Figure 7, vision-language models like BLIP-2 [41] tend to produce oversimplified textual descriptions that critically lack actionable visual-semantic signals. The VLM-generated caption (\"A map of the united states with the location of the united states\") merely identifies coarse-grained scene semantics, failing to capture object-level attributes (color coding of regions), spatial relationships (border adjacency between Arizona and Mexico) and compositional context (compass orientation in lower-right corner). In contrast, human annotations (\"This is a map of the United States. The main part of the country is shown in green, with several states labeled. Arizona is in the southwestern part of the US, bordering Mexico. Oklahoma is in the central - southern region. Louisiana is located along the Gulf of Mexico in the southeastern part. West Virginia is in the eastern part of the country. There's also a compass in the bottom - right corner to show directions.\") demonstrate essential characteristics for multimodal reasoning." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.679, + 0.481, + 0.697 + ], + "angle": 0, + "content": "D. Retrieval Strategy in MMKG Construction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.705, + 0.483, + 0.735 + ], + "angle": 0, + "content": "We adopt retrieval strategies based on the framework provided by LightRAG [28], which supports multiple modes:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.736, + 0.433, + 0.75 + ], + "angle": 0, + "content": "- local: focuses on context-dependent information;" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.751, + 0.334, + 0.765 + ], + "angle": 0, + "content": "- global: utilizes global knowledge;" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.766, + 0.458, + 0.78 + ], + "angle": 0, + "content": "- hybrid: combines local and global retrieval methods;" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.781, + 0.482, + 0.811 + ], + "angle": 0, + "content": "- naive: performs basic search without advanced techniques;" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.811, + 0.461, + 0.825 + ], + "angle": 0, + "content": "- mix: integrates knowledge graph and vector retrieval;" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.736, + 0.482, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.826, + 0.483, + 0.901 + ], + "angle": 0, + "content": "In our implementation, we rely on the hybrid retrieval mode, which balances the precision of local cues with the breadth of global knowledge. This strategy improves the relevance and completeness of retrieved information, which is crucial for high-quality MMKG construction." + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.091, + 0.738, + 0.106 + ], + "angle": 0, + "content": "Algorithm 1 MMKG Generation" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.113, + 0.905, + 0.144 + ], + "angle": 0, + "content": "Require: \\(\\hat{S}\\) (refined description), \\(T\\) (external knowledge, optional)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.144, + 0.786, + 0.159 + ], + "angle": 0, + "content": "Ensure: \\(\\mathcal{G} = (\\mathcal{E},\\mathcal{R})\\) (knowledge graph)" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.159, + 0.905, + 0.173 + ], + "angle": 0, + "content": "1: \\(\\mathcal{T}\\gets \\hat{S}\\oplus T\\) \\(\\triangleright\\) Concatenate \\(\\hat{S}\\) and \\(T\\)" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.174, + 0.905, + 0.189 + ], + "angle": 0, + "content": "2: \\(\\mathcal{G} \\leftarrow\\) LightRAG(T) \\(\\triangleright\\) Generate graph via LightRAG" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.189, + 0.905, + 0.204 + ], + "angle": 0, + "content": "3: \\((\\mathcal{E},\\mathcal{R})\\gets f_{\\mathrm{ERE}}(\\mathcal{T})\\) Extract entities and relations" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.205, + 0.808, + 0.22 + ], + "angle": 0, + "content": "4: return \\(\\mathcal{G} = \\{(h,r,t)\\mid h,t\\in \\mathcal{E},r\\in \\mathcal{R}\\}\\)" + }, + { + "type": "list", + "bbox": [ + 0.525, + 0.159, + 0.905, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.25, + 0.907, + 0.492 + ], + "angle": 0, + "content": "LightRAG is an excellent project that effectively supports automatic MMKG construction, and its retrieval design plays a central role in our framework. Specifically, LightRAG introduces keyword-guided text chunking to expand the retrievable context. By leveraging both high-level and low-level keywords in combination with chunk-level vector retrieval, it enables more comprehensive knowledge access. In addition, the choice of the retrieval model is also important. Larger LLMs have slower retrieval speeds but better performance. In this experiment, we used Qwen2.5-7B for retrieval. We also tested the retrieval performance of 32B and 72B models, which showed a \\(1\\% - 5\\%\\) improvement in performance, but it also significantly increased the graph construction time. Therefore, we finally adopted a lightweight retrieval model. The details of the entire LightRAG are shown in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.505, + 0.836, + 0.523 + ], + "angle": 0, + "content": "E. Selection of Sensitivity Threshold \\(\\tau\\)" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.53, + 0.906, + 0.636 + ], + "angle": 0, + "content": "We select the sensitivity threshold \\(\\tau\\) empirically based on performance on the validation set. In practice, \\(\\tau\\) can be approximately determined by observing the token length distribution of captions: datasets with richer visual content and longer captions tend to benefit from a lower \\(\\tau\\), while simpler datasets can tolerate a higher \\(\\tau\\). This provides a practical way to adjust \\(\\tau\\) without extensive tuning." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.636, + 0.906, + 0.758 + ], + "angle": 0, + "content": "In addition, we notice a key pattern when analyzing the relevance scores across windows. Around certain values of \\(\\tau\\), the scores tend to cluster tightly on both sides of the threshold. As a result, even a small change in \\(\\tau\\) near these points can lead to a large change in the number of tokens being pruned. This indicates that the pruning process is especially sensitive around those points, and adjusting \\(\\tau\\) even slightly may have a big impact on the final token budget." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.77, + 0.824, + 0.788 + ], + "angle": 0, + "content": "F. Construction Cost and Scalability" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Construction cost is a complex issue, which we analyze from the perspectives of time and hardware requirements. Time-wise, the main components are CoE and LightRAG. While using APIs can significantly speed up the process, offline deployment and inference are also feasible. For example, generating descriptions with Qwen2-VL-7B achieves around 60 tokens per second, processing one image ev" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.094, + 0.092, + 0.482, + 0.257 + ], + "angle": 0, + "content": "ery 4 seconds. Thus, processing 1k images takes approximately 1.21 hours. Constructing a KG with Qwen2.5-7B yields about 196k tokens per hour, leading to a total of 1.33 hours for 1k images. The intermediate pruning step, accelerated by CLIP's fast processing speed, is negligible. Overall, the cost is much lower than manual annotation or fine-tuning LLMs, making the method applicable to largescale datasets. For resource-constrained users, deploying a lightweight VLM with CoE is comparable to or even more efficient than deploying a powerful VLM, further demonstrating the scalability of our approach." + }, + { + "type": "title", + "bbox": [ + 0.095, + 0.269, + 0.481, + 0.303 + ], + "angle": 0, + "content": "G. Discussion on VLM Usage and Design Flexibility" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.312, + 0.482, + 0.462 + ], + "angle": 0, + "content": "Our observations on the number and type of VLMs used in CoE are consistent with the original conclusions drawn in the CoE paper [74]. Regardless of the specific VLM architecture, increasing the number of models \\(N\\) consistently improves performance up to a saturation point, after which further scaling yields diminishing returns. Moreover, we find that convergence is achieved more quickly when using lower softmax temperatures or simpler datasets. These factors reduce the ambiguity in model disagreement, allowing consensus to form more rapidly among the ensemble." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.464, + 0.482, + 0.583 + ], + "angle": 0, + "content": "Interestingly, our results also show that using a single, strong VLM can achieve performance comparable to a cascade of smaller, lightweight models. This suggests a practical trade-off between model strength and ensemble size—while ensembling helps in reaching consensus across diverse weak learners, a single high-capacity model may suffice in many scenarios, especially when computational resources are limited." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.585, + 0.482, + 0.795 + ], + "angle": 0, + "content": "In the original CoE method, the outputs from all VLM experts are first aggregated together, and then a selection process determines which expert descriptions to use. To save time in constructing the MMKGs with LLMs, we instead adopted a sequential strategy where the output of one expert is used as the prompt input for the next. We also evaluated the original aggregation and selection strategy on a smaller-scale dataset and found it to perform well, sometimes even surpassing the sequential approach. This confirms that CoE's original design of aggregating all experts' outputs before selecting which descriptions to use is effective and remains a strong baseline. However, correspondingly, using LLMs to construct MMKGs based on these aggregated descriptions requires significantly more time." + }, + { + "type": "text", + "bbox": [ + 0.095, + 0.797, + 0.482, + 0.9 + ], + "angle": 0, + "content": "Additionally, while we apply pruning only at the final description step, pruning during intermediate steps may also yield good results depending on the dataset and task. There is no fixed rule for when or how to apply pruning, and our framework is designed to be flexible enough to accommodate different strategies. We emphasize that both our CoE framework and the SV step are intended to be adaptable, al" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "lowing users to experiment freely and select the approach that best suits their needs." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.123, + 0.905, + 0.197 + ], + "angle": 0, + "content": "There are various VLMs that can be used for pruning. Among them, we recommend CLIP due to its fast inference speed and pruning performance comparable to other VLMs. Given its efficiency and effectiveness, CLIP serves as a practical choice for pruning in many scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_origin.pdf b/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8874cce2bbd2ab4868b7d0ad2f9468b77edda8dd --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2880e854ec0d38216909e5a0916960fcb0a5a7141b905312d2d76908927df4a3 +size 9468399 diff --git a/data/2025/2503_12xxx/2503.12972/full.md b/data/2025/2503_12xxx/2503.12972/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b739f3ff6c07332a50e8148396c6496e74989fea --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/full.md @@ -0,0 +1,455 @@ +# Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning + +Junming Liu $^{1,2}$ , Siyuan Meng $^{2,3}$ , Yanting Gao $^{1}$ , Song Mao $^{2}$ , Pinlong Cai $^{2}$ , + +Guohang Yan $^{2}$ , Yirong Chen $^{2,4}$ , Zilin Bian $^{5}$ , Ding Wang $^{2*}$ , Botian Shi $^{2}$ + +$^{1}$ Tongji University $^{2}$ Shanghai Artificial Intelligence Laboratory + +$^{3}$ East China Normal University $^{4}$ Stanford University $^{5}$ New York University + +liu_junming6917@tongji.edu.cn wangding@pjlab.org.cn + +# Abstract + +Multimodal reasoning in Large Language Models (LLMs) struggles with incomplete knowledge and hallucination artifacts, challenges that textual Knowledge Graphs (KGs) only partially mitigate due to their modality isolation. While Multimodal Knowledge Graphs (MMKGs) promise enhanced cross-modal understanding, their practical construction is impeded by semantic narrowness of manual text annotations and inherent noise in visual-semantic entity linkages. In this paper, we propose Vision-align-to-Language integrated Knowledge Graph (VaLiK), a novel approach for constructing MMKGs that enhances LLMs reasoning through cross-modal information supplementation. Specifically, we cascade pre-trained Vision-Language Models (VLMs) to align image features with text, transforming them into descriptions that encapsulate image-specific information. Furthermore, we developed a cross-modal similarity verification mechanism to quantify semantic consistency, effectively filtering out noise introduced during feature alignment. Even without manually annotated image captions, the refined descriptions alone suffice to construct the MMKG. Compared to conventional MMKGs construction paradigms, our approach achieves substantial storage efficiency gains while maintaining direct entity-to-image linkage capability. Experimental results on multimodal reasoning tasks demonstrate that LLMs augmented with VaLiK outperform previous state-of-the-art models. Our code is published at https://github.com/Wings-Of-Disaster/VaLiK. + +# 1. Introduction + +Recent advancements in Large Language Models (LLMs) [2, 10, 26, 66] have demonstrated their superiority and versatility across various Natural Language Reasoning (NLR) tasks [9, 44, 54, 59]. To enhance LLMs into the + +![](images/4f0401946432d33d9c1cf582ceba501170762e9bc5154f160d35a6b7809d9e45.jpg) +Figure 1. (a) Training entity extraction models relies on extensive fine-grained annotations, increasing labeling costs. More examples are provided in Appendix B. (b) Capturing implicit semantic associations demands abstract comprehension or logical inference. + +realm of multimodal reasoning, researchers [65, 72, 75, 80] have endeavored to equip these models with multimodal capabilities, as evidenced by advancements in Multimodal Large Language Models (MLLMs) such as BLIP-2 [41], GPT-4o [33], Janus-Pro [14], among others. Despite their notable progress, these models often experience hallucinations [5, 35], primarily arising from knowledge deficiencies due to incomplete or obsolete information. + +Fine-tuning LLMs demands prohibitive computational costs [32]. While text-based Knowledge Graphs (KGs) have partially addressed this limitation by efficient real-time updates [6, 63, 73], they are still restricted by modal isolation, which hinders cross-modal reasoning, as detailed in Appendix A. To bridge this semantic fragmentation, Multimodal Knowledge Graphs (MMKGs) have been developed as unified representational frameworks [11, 34, 39, 46]. + +However, constructing robust MMKGs faces two primary obstacles [16, 90]. First, the lack of large-scale fine-grained entity-image corpora makes it infeasible to train high-quality entity extractors, significantly constraining scalability, as illustrated in Figure 1a. Second, conventional visual relation detectors primarily identify superficial spatial interactions instead of semantic relations consistent with KGs, while frequently hallucinating implausible connections that corrupt graph integrity, as shown in Figure 1b. + +In this paper, we propose VaLiK, short for Vision-align-to-Language integrated Knowledge Graph, a novel framework designed to empower LLMs with advanced multimodal reasoning. Unlike traditional methods that rely on text annotations for training extraction models and the knowledge construction process [55], VaLiK adopts a annotation-free approach to MMKGs construction. Specifically, we first employ several pretrained Vision-Language models (VLMs), designed based on Chain-of-Experts (CoE) principles [74], to convert visual inputs into image-specific textual descriptions through cross-modal feature alignment. This procedure eliminates the need for manually annotated image captions in both the knowledge extraction and construction phases while preserving visual details typically missing in generic text descriptions. Moreover, in contrast to existing relation detection methods that require predefined label taxonomies [17, 61, 82, 85], VaLiK excels at extracting profound semantic relationships that are both KG-compatible and capture novel associations beyond training supervision. While VLMs enable cross-modal reasoning and interpretation, they introduce spurious relational noise through hallucinated inter-modal attributions, as depicted in Figure 2. We address this limitation through cross-modal similarity recalibration, strategically filtering inconsistent information while preserving valid semantic correspondences. Finally, the purified descriptions are systematically organized into MMKGs via LLM-driven symbolic structuring [28], bridging visual and textual domains with factual consistency. + +To thoroughly evaluate the VaLiK method, we conduct a comprehensive assessment across two critical multimodal benchmarks: multimodal classification (tested on the CrisisMMD dataset [3]) and multimodal question answering (evaluated via the ScienceQA benchmark [48]). The experiments span diverse LLM architectures and MMKG construction techniques to ensure the framework's robustness. + +![](images/0565a1818d28d5c065a7ad32bab646f9d5e0994ab7c5dbfd84d22b45087b7151.jpg) +Figure 2. Feature-aligned descriptions from VLMs introduce redundant and inaccurate relationship patterns. + +The experimental results demonstrate that the MMKGs constructed by VaLiK achieve superior multimodal reasoning performance in LLMs while requiring substantially less storage than conventional approaches. More importantly, the proposed approach retains direct entity-to-image linkage capabilities even with the compressed graph structure. + +In summary, VaLiK is the first framework that enables end-to-end, annotation-free, zero-shot, and storage-efficient multimodal knowledge construction with high adaptability and scalability. Our key contributions include: + +- To the best of our knowledge, VaLik is the first end-to-end framework to build Annotation-Free MMKGs to improve LLMs' multimodal reasoning capabilities, effectively eliminating the need for manually annotated textual material and enabling a completely autonomous multimodal knowledge generation process. +- We offer an innovative zero-shot method for constructing MMKG that captures deep semantic connections beyond traditional predetermined labels with an effective verification system that guarantees the accuracy of these relationships. The knowledge distillation paradigm greatly decreases storage while maintaining semantic integrity. +- We develop a highly modular and extensible architecture that allows VaLiK to effortlessly incorporate new models and workflows for specialized domain tasks, facilitating rapid adaptation to diverse application scenarios without incurring expensive system changes. + +# 2. Related Work + +# 2.1. Multimodal Knowledge Graphs + +The principal advantage of MMKGs resides in their multimodal integration beyond conventional KGs. By linking entities with corresponding visual or textual data, MMKGs introduce valuable visual and textual information to the knowledge base, substantially advancing multimodal reasoning capabilities. This combination addresses core challenges in tasks that inherently demand multimodal synergy + +like autonomous driving [27, 29], image-text retrieval [24, 87] and robotic manipulation [52, 58]. However, constructing trustworthy MMKGs with minimal manual effort remains a critical challenge. Recent studies have proposed innovative strategies to enhance MMKG reliability and utility. For instance, Chen et al. [13] proposed MSPT, a framework addressing continual MMKG construction through gradient modulation for balanced multimodal learning and attention distillation to mitigate catastrophic forgetting. Song et al. [61] developed Scene-MMKG, integrating knowledge engineering with large language models to improve robotic manipulation by resolving data sparsity and knowledge uncertainty. Wang et al. [70] introduced TIVA-KG, the first quad-modal knowledge graph spanning text, image, video, and audio with triplet grounding, empirically validating its effectiveness in downstream tasks. While these advances enhance multimodal reasoning capabilities, their efficacy remains rooted in resource-intensive paradigms, requiring extensively annotated datasets for knowledge acquisition. + +# 2.2. Knowledge-Augmented Multimodal Learning + +Multimodal learning has seen significant progress in aligning and integrating information across different data modalities [7, 45, 76]. The incorporation of structured knowledge through MMKGs further enhances these approaches, improving the reasoning capabilities and generalization across a variety of domains, such as visual question answering [51, 60, 68], recommendation systems [18, 62, 71], and classification [31, 56, 84]. Methods like GraphAdapter's dual-KG adaptation [42] and contrastive multi-relational encoding with KGs [23] inject external knowledge into models, refining their performance and improving their capability to handle complex tasks. Additionally, Lee et al. [39] proposed MR-MKG, a novel framework that constructs task-specific MMKGs to enhance multimodal reasoning in LLMs. These knowledge-augmented paradigms demonstrate superior cross-modal semantic grounding compared to unimodal approaches [15, 36]. However, their reliance on preconstructed MMKGs often leads to domain discrepancies, where generic knowledge schemas misalign with task-specific reasoning patterns, ultimately limiting contextual precision in target applications. + +# 2.3. Multimodal Large Language Models + +The limitations of text-only LLMs in meeting increasingly complex demands have spurred extensive research [79, 83, 86] into developing LLMs capable of effectively processing and reasoning over multimodal inputs. Current research predominantly employs adapter or projection layers to connect the embedding spaces of various modality-specific encoders with the textual embedding space of LLMs [39]. For instance, foundational models like CLIP [57] and BLIP [40] pioneered cross-modal alignment by jointly training vision + +and text encoders to map images and text into a shared embedding space. Building on this, LLaVA [43] and Flamingo [4] advanced the field by integrating visual encoders with LLMs, enabling more nuanced multimodal understanding and generation. More recently, Gemini [64], Qwen2-VL [69] and GPT-4o [33] have further pushed the boundaries by scaling up multimodal pretraining and introducing sophisticated mechanisms for cross-modal interaction. However, multimodal LLMs remain prone to hallucinations. While they enhance cross-modal alignment, they neither acquire new knowledge nor avoid introducing noise through integration. To address these limitations, VaLiK "uses the master's tools to refine the master's craft," first constructing MMKGs via MLLMs and then leveraging them to enhance MLLMs' reasoning capabilities. + +# 3. Method + +In this section, we present the technical details of VaLiK. VaLiK introduces a novel expansion-reduction paradigm for visual knowledge extraction. The architecture initially organizes several VLMs with distinct knowledge domains, designed based on CoE principles [74], to produce comprehensive textual descriptions encompassing hierarchical visual details. A cross-modal similarity verification mechanism then iteratively filters out noisy tokens through cross-modal alignment while preserving semantically salient elements. This optimization-style approach eliminates external textual dependencies while enabling effective MMKG construction. VaLiK's framework is shown in Figure 3. + +# 3.1. CoE-based Visual to Language Modeling + +Recent entity detection techniques [20, 81, 91] have been widely adopted for entity and relation extraction in MMKG construction. However, these methods are inherently limited by predefined categorical boundaries, lacking the capacity to recognize visual concepts outside their training vocabulary. In contrast, VLMs pretrained on web-scale corpora [12, 41, 89] exhibit broader recognition capabilities through exposure to diverse visual concepts. + +We therefore leverage pretrained VLMs to extract comprehensive visual information. This process removes the necessity for detailed fine-grained data typically required to train specialized recognition models. The generalized vision to language conversion pipeline can be formalized as: + +$$ +S = \mathcal {D} _ {\text {t e x t}} \left(\mathcal {A} \left(\mathcal {E} _ {\text {v i s}} (I)\right)\right), \tag {1} +$$ + +where $I$ denotes for the input image, $\mathcal{E}_{\mathrm{vis}}$ denotes the visual encoder extracting visual features, $\mathcal{A}$ carries out cross-modal feature alignment and interaction, and $\mathcal{D}_{\mathrm{text}}$ generates textual tokens through autoregressive decoding. The resulting visual description $S = \{w_{1},\dots,w_{n}\}$ emerges from this multi-stage processing. + +![](images/3ae9a45583be9946e86dc7de188a71f381a9113b43215237e1f028a2f67cfac2.jpg) +Figure 3. The pipeline of VaLiK: First, large-scale visual descriptions are generated using CoE-based VLMs. Then, a similarity verification mechanism is used to prune irrelevant information. Finally, MMKGs are constructed using LLMs based on LightRAG. The constructed MMKGs can assist LLMs in multimodal reasoning, alleviating the hallucination issues caused by incomplete knowledge. + +However, quantitative analysis uncovers considerable discrepancies between machine-generated and human-annotated descriptions [88]. As an illustration, while utilizing BLIP-2 [41] to generate sample captions, we noted that the model outputs are markedly concise and devoid of visual specifics, as detailed in Appendix C. To bridge this gap, we implement CoE enhanced generation through cascade VLMs processing. At iteration step $t$ , each expert $E_{i}$ receives both the original visual signals $I$ and the contextual output from the preceding expert $E_{i - 1}$ : + +$$ +\mathcal {S} _ {i} ^ {(t)} = E _ {i} \left(I, \mathcal {S} _ {i - 1} ^ {(t - 1)}\right), \tag {2} +$$ + +where $S_{i - 1}^{(t - 1)}$ denotes the description from expert $E_{i - 1}$ at step $t - 1$ , with $S_0^{(t)}\coloneqq \emptyset$ for initialization. + +Specifically, each expert $E_{i}$ implements a unified visual-language processing task: + +# 1. Visual Feature Extraction: + +$$ +\mathbf {V} _ {i} = \operatorname {E n c} _ {\text {v i s}} ^ {i} (I) \in \mathbb {R} ^ {d _ {v} \times N _ {p}}, \tag {3} +$$ + +where $\mathsf{Enc}_{\mathrm{vis}}^i$ denotes established visual encoder [21, 30, 47] producing $N_{p}$ patch embeddings with dimension $d_v$ . + +# 2. Cross-Modal Interaction and Generation: + +VLMs integrate pretrained learnable query embeddings $\mathbf{Q}_i\in \mathbb{R}^{d_q\times L_q}$ to interact with visual features $\mathbf{V}_i\in$ $\mathbb{R}^{d_v\times N_p}$ via cross-attention [67]: + +$$ +\begin{array}{l} \mathbf {H} _ {i} = \operatorname {C r o s s A t t n} \left(\mathbf {Q} _ {i}, \mathbf {V} _ {i}\right) \\ = \operatorname {s o f t m a x} \left(\frac {\mathbf {Q} _ {i} \mathbf {W} _ {q} ^ {i} \left(\mathbf {V} _ {i} \mathbf {W} _ {k} ^ {i}\right) ^ {\top}}{\sqrt {d _ {k}}}\right) \mathbf {V} _ {i} \mathbf {W} _ {v} ^ {i}, \tag {4} \\ \end{array} +$$ + +where $\mathbf{W}_q^i\in \mathbb{R}^{d_q\times d_k}$ , $\mathbf{W}_k^i$ , $\mathbf{W}_v^i\in \mathbb{R}^{d_v\times d_k}$ , and $L_{q}$ denotes the predefined query length. Cross-attention serves + +as a prevalent approach, while other interaction strategies coexist [4]. The adopted VLMs in our implementation primarily rely on this approach for modality fusion. + +# 3. Text Generation: + +The text encoder $\mathsf{Enc}_{\mathrm{text}}^{i}$ first processes the preceding expert's output $S_{i - 1}^{(t - 1)}$ into latent features: + +$$ +\mathbf {P} _ {i} = \operatorname {E n c} _ {\text {t e x t}} ^ {i} \left(S _ {i - 1} ^ {(t - 1)}\right) \in \mathbb {R} ^ {d _ {t} \times L}. \tag {5} +$$ + +Subsequently, the text decoder $\mathrm{Dec}_{\mathrm{text}}^{i}$ synthesizes the final output $S_{i}^{(t)}$ by jointly conditioning on $\mathbf{P}_i$ and $\mathbf{H}_i$ : + +$$ +\mathcal {S} _ {i} ^ {(t)} = \operatorname {D e c} _ {\text {t e x t}} ^ {i} \left(\mathbf {P} _ {i}, \mathbf {H} _ {i}\right) = \left\{w _ {1} ^ {(t, i)}, \dots , w _ {m} ^ {(t, i)} \right\}. \tag {6} +$$ + +Ultimately, the final textual description $S_N^{(C)}$ is obtained after $C$ iteration steps through $N$ cascaded experts. + +# 3.2. Cross-Modal Similarity Verification + +To address noise in VLM-generated captions, we design a sliding window mechanism with semantic consistency verification. This method ensures that only relevant and semantically consistent segments are retained in the final description. Let $W_{k}$ denote the $k$ -th window containing $m$ consecutive tokens $\{w_{km + 1},\dots ,w_{(k + 1)m}\}$ . For each window, we compute its cross-modal similarity score: + +$$ +\alpha_ {k} = \frac {\operatorname {E n c} _ {\text {v i s}} (I) \cdot \operatorname {E n c} _ {\text {t e x t}} \left(W _ {k}\right)}{\| \operatorname {E n c} _ {\text {v i s}} (I) \| \| \operatorname {E n c} _ {\text {t e x t}} \left(W _ {k}\right) \|}, \tag {7} +$$ + +where $\mathsf{Enc}_{vis/text}(\cdot)$ adopts a lightweight CLIP [59] encoder-decoder with frozen parameters for efficient processing. The similarity score $\alpha_{k}$ lies within the range [0, 1], with higher values indicating a stronger alignment between the visual and textual information. + +After calculating the cross-modal similarity for each window, we employ an empirical threshold $\tau$ to filter out low-similarity windows. This threshold helps to identify and discard noisy or irrelevant sections of the generated caption that do not align well with the visual content, thereby reducing the impact of inaccurate or misleading descriptions. Formally, for each window $W_{k}$ , if $\alpha_{k} < \tau$ , the window is discarded as noise. This process effectively prunes windows with low similarity scores, ensuring that only semantically meaningful segments remain. The final denoised description $\hat{S}$ is obtained by concatenating all windows $W_{k}$ for which $\alpha_{k} \geq \tau$ : + +$$ +\hat {S} = \bigcup_ {\alpha_ {k} \geq \tau} W _ {k}. \tag {8} +$$ + +Our window size $m$ is flexibly determined and generally adapts dynamically to natural sentence segmentation. + +# 3.3. MMKG Construction for Enhanced Reasoning + +LLMs have become increasingly popular for identifying entities, relationships, and attributes within a corpus, which are then organized into a KG. The strength of LLM-based KG generation lies in its capacity to leverage the vast amount of knowledge encoded within these models, allowing them to detect complex and nuanced patterns across diverse data sources. This approach eliminates the need for manual annotation, enabling a highly scalable and domain-adaptive process suitable for a wide range of applications. + +We begin by refining the generated textual description $\hat{S}$ (VLM-based information), which is then optionally concatenated with any available external textual knowledge $T$ to form the input for KG generation. This combined input is used to generate MMKGs with the help of a LLM [22, 28], leveraging its capacity for multi-hop reasoning and dynamic knowledge integration. + +$$ +\mathcal {G} = \operatorname {L L M} (\hat {S} \oplus T), \tag {9} +$$ + +where $\oplus$ denotes optional concatenation based on the availability of $T$ . The resulting graph $\mathcal{G}$ captures both visual and textual relationships inferred by the LLM. + +We define $\mathcal{G}$ as a set of triplets: + +$$ +\mathcal {G} = \{(h, r, t) \mid h, t \in \mathcal {E}, r \in \mathcal {R} \}, \tag {10} +$$ + +where $\mathcal{E}$ and $\mathcal{R}$ denote the sets of entities and relations. Entities include objects or concepts from the image or external text, while relations describe connections such as "is a type of," "part of," or "has property." Each triplet $(h,r,t)$ links a head entity $h$ and a tail entity $t$ via relation $r$ . + +Multimodal Reasoning Enhancement. To support multimodal reasoning, we retrieve relevant triplets from $\mathcal{G}$ through structural patterns during LLMs inference: + +$$ +\mathcal {G} _ {q} = \operatorname {R e t r i e v e} (q, \mathcal {G}), \tag {11} +$$ + +where $\text{Retrieve}(\cdot)$ denotes a retrieval strategy that identifies subgraphs relevant to the query for reasoning. Detailed retrieval strategies are described in Appendix D. + +The augmented prompt integrates multimodal evidence: + +$$ +p _ {\mathrm {a u g}} = q \left\|\left(\bigcup_ {(h, r, t) \in \mathcal {G} _ {q}} [ h ] \rightarrow r \rightarrow [ t ]\right). \right. \tag {12} +$$ + +Note that we incorporate the storage locations of images in the database during MMKGs construction, enabling the MMKGs to link to visual data. VaLiK enables text-only LLMs to perform multimodal reasoning through $\mathcal{G}$ 's visual associations, while VLMs refresh knowledge representations by jointly injecting both visual and textual information, significantly mitigating hallucination risks. + +# 4. Experiment + +# 4.1. Setups + +Evaluation Datasets. We evaluate VaLiK on two multimodal reasoning benchmarks with distinct characteristics: + +- CrisisMMD [3]. This real-world disaster response dataset includes around 35,000 noisy social media postings with paired images and text, each annotated for seven catastrophe categories and four severity levels. Its realistic user-generated content with natural noise and implicit modality correlations provides a rigorous testbed for zero-shot adaptation, with good performance indicating practical relevance in real-world crisis scenarios. + +- ScienceQA [48]. This dataset contains 21,208 multimodal science questions combining textual and visual contexts, with $48.7\%$ of instances containing images. Questions span physics, chemistry, and biology domains, requiring cross-modal reasoning between textual concepts and visual diagrams. Additionally, ScienceQA offers image captions to aid text-only LLMs in reasoning, allowing a comparison of unimodal approaches. + +Task Formulation. For CrisisMMD, we define three multimodal classification tasks1: (1) binary information relevance filtering, (2) fine-grained humanitarian category recognition, and (3) a consolidated taxonomy with merged categories to reduce label complexity. We omit the unimodal damage assessment to focus on multimodal aspects. For ScienceQA, we follow the original evaluation using multiple metrics: question types, contextual modalities, and educational stages. Performance is assessed through accuracy percentage across these categories. + +Baselines. We conduct a comprehensive evaluation of text-only LLMs, multimodal VLMs, and KGs that enhance LLMs in multimodal reasoning. + +- For CrisisMMD, we compare text-only LLMs using few-shot prompting (LLaMA-2 [66], GPT-4 [2], + +
TaskText-only LLMsKG-Enhanced LLMs
LLaMA-2GPT-4DeepSeek-R1Qwen2.5LightRAGVaLiK
7B13B70B-7B8B32B70B7B32B72BText-onlyImage-onlyText-Image
Task 162.3263.8063.1566.8367.2363.3163.6165.5365.0467.2867.9567.4969.5268.90
Task 218.3221.8228.8747.2526.5325.4924.7721.0544.5246.9450.5145.1149.5450.02
Task 2 Merged21.4533.1536.8949.4425.8523.5621.5525.5745.3347.0750.2945.9449.0750.69
+ +Table 1. The performance evaluation of text-only LLMs using few-shot prompting without any fine-tuning on the training set. As these models handle text only, test data is formatted as unimodal text for compatibility. In our implementations, both LightRAG and VaLiK adopt Qwen2.5-7B as the base reasoning model. Bold indicates the highest value, and underline indicates the second highest. + +
TaskMultimodal VLMsKG-Enhanced LLMs
CLIPLLaVABLIP-2GPT-4oQwen2-VLVaLiK
ViT-L/147B13B34BFlan-T5-XLOPT-2B-I7B-I72B-I*#+~
Task 143.3654.0060.5856.4461.2938.6268.2047.5662.4565.8060.7868.4461.1168.89
Task 217.8828.0120.1425.1540.8614.2647.587.6032.6847.2125.8048.8827.2349.78
Task 2-M20.7930.6123.4425.0740.7214.2749.557.4234.2048.2827.3149.2729.0949.31
+ +Table 2. The performance of multimodal VLMs and KG-enhanced LLMs. The -I suffix denotes instruction-tuned variants. Symbol markers denote KG types and models: the asterisk (*) represents image-only KG with LLaVA-34B, hash (#) indicates image-only KG using Qwen2-VL-72B-I, plus (+) denotes text-image KG with LLaVA-34B, and tilde (\*) shows text-image KG using Qwen2-VL-72B-I. + +DeepSeek-R1 [26], Qwen-2.5 [77]) and multimodal VLMs (CLIP [57], LLaVA [43], GPT-4o [33], Qwen2-VL [69], BLIP-2 [41]). + +- For ScienceQA, we compare models for general domains in zero/few-shot settings, including text-only LLMs (GPT Model [48], CoT [48], DDCoT [86]), multimodal VLMs (LG-VQA [25], LaVIN [50], BLIP-2, CCOT [53], GraphVis [19]) and Tool-LLM Chameleon [49]. These models are not specifically fine-tuned for scientific tasks, ensuring a fair evaluation of generalization capabilities. +- We further compare the multimodal reasoning performance of LLMs assisted by KGs, evaluating text-based KGs built with LightRAG [28], and pre-constructed MMKGs such as Visual Genome [38] and Mmkg [46]. + +Implementation. For MMKG construction, we design a chain of VLMs including BLIP-2, LLaVA, and Qwen2-VL, with the CLIP-ViT-L/14 for pruning. Stronger or additional VLMs could be employed to enhance performance if more computational resources are available. We use the entire training set as the knowledge base and construct MMKGs from the extracted descriptions based on the LightRAG framework. In comparative experiments, the LightRAG method we evaluate utilizes only textual data, while VaLiK employs two configurations: (1) fully image-generated text descriptions (Image-only), and (2) original text combined with image-generated text (Text-Image). Dynamic window partitioning based on sentence length ensures syntactically coherent pruning results. Similarity thresholds are set to $\tau = 0.25$ for CrisisMMD and $\tau = 0.20$ for ScienceQA based on empirical evaluations to balance precision and recall. See Appendix E for selection details. We construct the + +graph using DeepSeek-R1-70B and implement LightRAG's hybrid retrieval approach with Qwen2.5-7B. For graph construction and multimodal reasoning, we utilize $1 \times$ NVIDIA A100-80GB GPUs. Task-specific prompts are designed to assist LLMs in multimodal reasoning evaluation. + +# 4.2. Main Results + +Multimodal Classification Tasks. We conduct multimodal classification experiments on the CrisisMMD dataset, evaluating both text-only LLMs and multimodal VLMs. Detailed comparative results are provided in Tables 1 and 2. For text-only LLMs, we adopt Qwen2.5-7B as the foundational reasoning model. Remarkably, the VaLiK-enhanced version achieves state-of-the-art (SOTA) performance matching that of the native Qwen2.5-72B model. The image-only KG constructed through VaLiK demonstrates an average accuracy improvement of $4.41\%$ across tasks, with the text-image variant attaining a $4.90\%$ enhancement. These improvements significantly surpass the $1.22\%$ gain obtained by LightRAG using textual KG. We further validate VaLiK's cross-scale applicability through evaluations on Qwen2.5-32B and 72B architectures, observing consistent $2.0\% - 2.5\%$ improvements. While not as significant as the 7B model's benefits, this shows that models that have substantial prior knowledge benefit less from external knowledge augmentation + +Unlike text-only LLMs that depend on MMKGs for visual understanding, VLMs primarily benefit from KGs integration through outdated knowledge refreshment. Due to the inherent availability of visual features during inference, VaLiK's performance gains for VLMs remain con + +
Method#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Human [48]-90.2384.9787.4889.6087.5088.1091.5982.4288.40
GPT-4 [43]-84.0673.4587.3681.8770.7590.7384.6979.1082.69
CoT (GPT-3) [48]173B75.4470.8778.0974.6867.4379.9378.2369.6875.17
CoT (UnifiedQA) [48]223M71.0076.0478.9166.4266.5381.8177.0668.8274.11
CoT (GPT-4) [49]1T+85.4872.4490.2782.6571.4992.8986.6679.0483.99
DDCoT [86]175B80.1576.7282.8278.8972.5385.0282.8675.2180.15
Chameleon (ChatGPT) [49]175B+81.6270.6484.0079.7770.8086.6281.8676.5379.93
LG-VQA (BLIP-2) [25]---------86.32
LaVIN-13B [78]---------77.54
BLIP-2 [78]---------74.17
CCOT7B--------76.84
GraphVis [19]7B--------73.18
Qwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
Qwen2.5-72B72B79.6467.1084.9077.5665.0087.9380.2574.8578.37
Qwen2.5-7B (Mmkg) [46]7B73.9866.3778.1871.6564.3079.6576.5168.0373.47
Qwen2.5-7B (Visual Genome) [38]7B76.7867.0478.0974.0566.1979.7278.0869.6875.08
Qwen2.5-7B (VaLiK Text-only)7B84.5474.2486.9182.7472.5390.0384.5180.2882.98
Qwen2.5-7B (VaLiK Image-only)7B79.1471.5479.2777.1669.7283.1480.6573.9678.88
Qwen2.5-7B (VaLiK Text-Image)7B84.1575.1487.6482.9973.1889.6984.4080.9583.16
Qwen2.5-72B (VaLiK Text-Image)72B85.6175.9390.2784.4074.1792.3385.7982.9884.77
+ +Table 3. Performance comparison (\%) on ScienceQA benchmark. #T-Params denotes trainable parameters. Categories: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG-Cap (image caption), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12). Method groups: (1) Human performance baseline, (2) Zero/Few-shot text-only LLMs, (3) Zero/Few-shot Multimodal VLMs, (4) LLMs enhanced with knowledge graphs for multimodal reasoning. + +strained compared to text-only counterparts. We separately applied VaLiK enhancement to Qwen2-VL-72B-Instruct and LLaVA-34B, obtaining distinct improvements: LLaVA-34B achieves accuracy gains of $2.41\%$ (image-only KG) and $3.59\%$ (text-image KG), while Qwen2-VL-72B-Instruct shows $1.77\%$ and $2.23\%$ improvements respectively under identical configurations. These experimental findings collectively demonstrate that VaLiK effectively extracts valuable signals from the training corpus and enables dynamic knowledge injection into VLMs during inference, thereby substantially alleviating hallucination phenomena. The differential improvements between Qwen2-VL-72B-Instruct and LLaVA-34B further validate the framework's adaptability across model architectures. + +Additionally, we analyze the results of LLMs without KG enhancement in the tables, which generally follow the scaling law [37]. However, DeepSeek-R1 shows anomalous behavior. Through testing, we find that its reasoning process may introduce complex information that interferes with its judgment. Furthermore, empirical results show that most baseline models achieve suboptimal performance without fine-tuning. In contrast, VaLiK's automated MMKG construction framework requires no task-specific adaptation yet delivers consistent improvements. + +Multimodal Question Answering Tasks. We evalu + +ated multimodal QA performance on the ScienceQA benchmark with Qwen2.5-7B and Qwen2.5-72B as base architectures, augmented by four knowledge sources: Mmkg, Visual Genome, text-only LightRAG and VaLiK. Compared to existing zero-shot/few-shot LLMs that not specifically optimized for scientific QA, our VaLiK-enhanced Qwen2.5-72B achieved SOTA performance on $62.5\%$ of subtasks, demonstrating particular strengths in multimodal reasoning scenarios requiring cross-modal alignment with an average accuracy gain of $6.4\%$ over baseline models. + +Our study identifies a fundamental imbalance between textual and visual knowledge representations in ScienceQA. Text-only KGs (14k entities, 18k relations) exhibit $8 \times$ denser structured knowledge than image-only counterparts (3k concepts, 1k relations), explaining visual modality underperformance. Despite this gap, vision-KG-augmented Qwen2.5-7B still attains $4.16\%$ accuracy gains over its non-enhanced version. Notably, our MMKG requires only 489MB storage for complete storage, while the scene graph component2 of Visual Genome alone occupies 739MB. This lightweight construction enables effective reasoning using only textual KG descriptions without raw images in resource-constrained scenarios. + +
TypeMethod#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Image-OnlyQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B80.06 (↑3.86)70.30 (↑2.47)80.55 (↑3.28)78.05 (↑3.56)68.43 (↑2.64)83.76 (↑4.74)81.17 (↑3.45)72.71 (↑3.36)78.14 (↑3.42)
+ SV7B79.14 (↓0.92)71.54 (↑1.24)79.27 (↓1.28)77.16 (↓0.89)69.72 (↑1.29)83.14 (↓0.62)80.65 (↓0.52)73.96 (↑1.25)78.88 (↑0.74)
Text-ImageQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B81.88 (↑5.68)73.00 (↑5.17)84.00 (↑6.73)80.55 (↑6.06)70.05 (↑4.26)87.11 (↑8.09)82.01 (↑4.29)77.98 (↑8.63)80.57 (↑5.85)
+ SV7B84.15 (↑2.27)75.14 (↑2.14)87.64 (↑3.64)82.99 (↑2.44)73.18 (↑3.13)89.69 (↑2.58)84.40 (↑2.39)80.95 (↑2.97)83.16 (↑2.59)
+ +Table 4. Ablation study on ScienceQA benchmark (CVs: CoE-based Vision-Language Models; SV: Similarly Verification). Performance metrics include: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG (image context), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12). + +
TypeMethodTask 1 (%)Task 2 (%)Task 2-Merged (%)
Image-OnlyQwen2.5-7B65.0444.5245.33
+ CVs68.11 (↑3.07)47.00 (↑2.48)46.95 (↑1.62)
+ SV69.52 (↑1.41)49.54 (↑2.54)49.07 (↑2.12)
Text-ImageQwen2.5-7B65.0444.5245.33
+ CVs68.43 (↑3.39)48.61 (↑4.09)48.97 (↑3.64)
+ SV68.90 (↑0.47)50.02 (↑1.41)50.69 (↑1.72)
+ +Table 5. Ablation study on CrisisMMD with Qwen2.5-7B. + +# 4.3. Ablation Study + +Our ablation studies on CrisisMMD and ScienceQA demonstrate the specific roles of VaLiK's components. As shown in Table 4 and Table 5, the CVs (CoE-based VLM) module improves accuracy across all settings, with average gains of $+3.05\%$ on CrisisMMD and $+4.63\%$ on ScienceQA tasks, validating visual descriptions enhance reasoning. However, the SV (Similarly Verification) module exhibits dual effects: it significantly improves CrisisMMD metrics by pruning redundant textual descriptions, yet slightly degrades ScienceQA's image-only natural science reasoning. We hypothesize this discrepancy arises from dataset characteristics: CrisisMMD's generated captions contain substantially more redundant content, whereas ScienceQA's simpler visual scenes yield shorter descriptions. Pruning these shorter descriptions risks over-removal of critical semantics. Furthermore, different types of KGs influence the effectiveness of the components: CVs achieve greater gains in CrisisMMD's text-image fusion as original text provides complementary context, while SV shows reduced effectiveness, likely due to occasional over-pruning of cross-modal linkages. Nevertheless, both modules collectively enhance performance across configurations, demonstrating their synergistic yet context-sensitive nature. + +# 4.4. Further Analysis + +Impact of VLM Quantity and Types. We evaluate the impact of varying quantities and types of VLMs on the CVs module. Our experiments reveal that Qwen2-VL generates the most visual descriptions, followed by LLaVA, while BLIP-2 produces the fewest. However, BLIP-2 demonstrates superior capability in extracting critical information + +![](images/05342ce1e79f74e662270b7c353d9bd93d8429578bb671ee1bbfba5e163509a6.jpg) +Figure 4. Impact analysis of VLM quantity on CrisisMMD. + +![](images/3790fc9cc3c4a05154d2d0fc160ef6d6d27e8609eb80567e719a7f54aa2e6a58.jpg) + +and identifying key entity relationships within images. We therefore adopt BLIP-2 as the primary model, with LLaVA or Qwen2-VL serving as secondary/tertiary components. Adding more VLMs yields diminishing returns, due to limited entities in current images, though we hypothesize their benefits would increase for complex visual scenes with richer semantic content. This phenomenon is empirically validated by our quantitative results in Figure 4. + +Computational Costs. Due to space limitations, we provide an overview of VaLiK's computational costs in Appendix F. Our method is significantly more cost-effective than manual annotation or LLM fine-tuning. + +# 5. Conclusion + +Multimodal reasoning in LLMs is constrained by incomplete knowledge and hallucination artifacts, limitations that persist because textual KGs cannot bridge visual-textual semantics due to their modality isolation. To bridge this gap, we propose VaLiK, a framework for constructing MMKGs through vision-language alignment, eliminating dependency on manual annotations while resolving visual-textual semantic inconsistencies. By integrating a cascade of pretrained VLMs and cross-modal verification, VaLiK converts images into structured knowledge while filtering noise. The resulting graphs enhance LLMs' reasoning with minimal storage overhead. Experiments on multimodal reasoning benchmarks show SOTA performance. VaLiK's modular design supports adaptability across domains, offering a scalable solution for autonomous knowledge synthesis. This work advances multimodal AI systems by enabling efficient integration of visual and textual data. + +# 6. Acknowledgments + +The research was supported by Shanghai Artificial Intelligence Laboratory, the National Key R&D Program of China (Grant No. 2022ZD0160201) and the Science and Technology Commission of Shanghai Municipality (Grant No. 22DZ1100102). + +# References + +[1] Mahdi Abavisani, Liwei Wu, Shengli Hu, Joel Tetreault, and Alejandro Jaimes. Multimodal categorization of crisis events in social media. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5 +[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1, 5 +[3] Firoj Alam, Ferda Ofli, and Muhammad Imran. Crisismmd: Multimodal twitter datasets from natural disasters. Proceedings of the International AAAI Conference on Web and Social Media, 12(1), 2018. 2, 5 +[4] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob L Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikol aj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karén Simonyan. Flamingo: a visual language model for few-shot learning. In Advances in Neural Information Processing Systems, pages 23716-23736. Curran Associates, Inc., 2022. 3, 4 +[5] Razvan Azamfirei, Sapna R Kudchadkar, and James Fackler. Large language models and the perils of their hallucinations. Critical Care, 27(1):120, 2023. 1 +[6] Jinheon Baek, Alham Fikri Aji, and Amir Saffari. Knowledge-augmented language model prompting for zero-shot knowledge graph question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), 2023. 2 +[7] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. Multimodal machine learning: A survey and taxonomy. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41(2):423-443, 2019. 3 +[8] Dawei Chen, Zhixu Li, Binbin Gu, and Zhigang Chen. Multimodal named entity recognition with image attributes and image knowledge. In Database Systems for Advanced Applications: 26th International Conference, DASFAA 2021, Taipei, Taiwan, April 11–14, 2021, Proceedings, Part II 26, pages 186–201. Springer, 2021. 1 +[9] Jiawei Chen, Hongyu Lin, Xianpei Han, and Le Sun. Benchmarking large language models in retrieval-augmented generation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17754-17762, 2024. 1 + +[10] Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems (NeurIPS), 33:22243-22255, 2020. 1 +[11] Xiang Chen, Ningyu Zhang, Lei Li, Shumin Deng, Chuanqi Tan, Changliang Xu, Fei Huang, Luo Si, and Huajun Chen. Hybrid transformer with multi-level fusion for multimodal knowledge graph completion. In Proceedings of the International Conference on Research and Development in Information Retrieva (SIGIR), pages 904-915, 2022. 2 +[12] Xi Chen, Josip Djolonga, Piotr Padlewski, Basil Mustafa, Soravit Changpinyo, Jialin Wu, Carlos Riquelme Ruiz, Sebastian Goodman, Xiao Wang, Yi Tay, et al. Pali-x: On scaling up a multilingual vision and language model. arXiv preprint arXiv:2305.18565, 2023. 3 +[13] Xiang Chen, Jingtian Zhang, Xiaohan Wang, Ningyu Zhang, Tongtong Wu, Yuxiang Wang, Yongheng Wang, and Huajun Chen. Continual multimodal knowledge graph construction. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, 2024. 3 +[14] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1 +[15] Yong Chen, Xinkai Ge, Shengli Yang, Linmei Hu, Jie Li, and Jinwen Zhang. A survey on multimodal knowledge graphs: Construction, completion and applications. Mathematics, 11 (8), 2023. 3 +[16] Zhuo Chen, Yichi Zhang, Yin Fang, Yuxia Geng, Lingbing Guo, Xiang Chen, Qian Li, Wen Zhang, Jiaoyan Chen, Yushan Zhu, et al. Knowledge graphs meet multimodal learning: A comprehensive survey. arXiv preprint arXiv:2402.05391, 2024. 2 +[17] Shiyao Cui, Jiangxia Cao, Xin Cong, Jiawei Sheng, Quanggang Li, Tingwen Liu, and Jinqiao Shi. Enhancing multimodal entity and relation extraction with variational information bottleneck. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 32:1274-1285, 2024. 2 +[18] Xiaohui Cui, Xiaolong Qu, Dongmei Li, Yu Yang, Yuxun Li, and Xiaoping Zhang. Mkgcn: Multi-modal knowledge graph convolutional network for music recommender systems. *Electronics*, 12(12), 2023. 3 +[19] Yihe Deng, Chenchen Ye, Zijie Huang, Mingyu Derek Ma, Yiwen Kou, and Wei Wang. Graphvis: Boosting llms with visual knowledge graph integration. In Advances in Neural Information Processing Systems, pages 67511-67534. Curran Associates, Inc., 2024. 6, 7 +[20] Tausif Diwan, G. Anirudh, and Jitendra V. Tembhurne. Object detection using yolo: challenges, architectural successors, datasets and applications. Multimedia Tools Appl., 82 (6):9243-9275, 2022. 3 +[21] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 4 +[22] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropoli + +tansky, Robert Osazuwa Ness, and Jonathan Larson. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130, 2024. 5 +[23] Quan Fang, Xiaowei Zhang, Jun Hu, Xian Wu, and Changsheng Xu. Contrastive multi-modal knowledge graph representation learning. IEEE Transactions on Knowledge and Data Engineering, 35(9):8983-8996, 2023. 3 +[24] Duoduo Feng, Xiangteng He, and Yuxin Peng. Mkvse: Multimodal knowledge enhanced visual-semantic embedding for image-text retrieval. ACM Trans. Multimedia Comput. Commun. Appl., 19(5), 2023. 3 +[25] Deepanway Ghosal, Navonil Majumder, Roy Ka-Wei Lee, Rada Mihalcea, and Soujanya Poria. Language guided visual question answering: Elevate your multimodal language model using knowledge-enriched prompts. arXiv preprint arXiv:2310.20159, 2023. 6, 7 +[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 6 +[27] Yunfei Guo, Fei Yin, Xiao-hui Li, Xudong Yan, Tao Xue, Shuqi Mei, and Cheng-Lin Liu. Visual traffic knowledge graph generation from scene images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 21604-21613, 2023. 3 +[28] ZIRUI GUO, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. LightRAG: Simple and fast retrieval-augmented generation, 2024. 2, 5, 6 +[29] Lavdim Halilaj, Juergen Luettin, Sebastian Monka, Cory Henson, and Stefan Schmid. Knowledge graph-based integration of autonomous driving datasets. International Journal of Semantic Computing, 17(02):249-271, 2023. 3 +[30] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4 +[31] Yang Hu, Guihua Wen, Adriane Chapman, Pei Yang, Mingnan Luo, Yingxue Xu, Dan Dai, and Wendy Hall. Graph-based visual-semantic entanglement network for zero-shot image recognition. IEEE Transactions on Multimedia, 24: 2473-2487, 2022. 3 +[32] Zhiqiang Hu, Lei Wang, Yihuai Lan, Wanyu Xu, Ee-Peng Lim, Lidong Bing, Xing Xu, Soujanya Poria, and Roy Lee. LLM-adapters: An adapter family for parameter-efficient fine-tuning of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5254-5276, Singapore, 2023. Association for Computational Linguistics. 2 +[33] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 3, 6 +[34] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 2 + +[35] Adam Tauman Kalai and Santosh S. Vempala. Calibrated language models must hallucinate. In Proceedings of the 56th Annual ACM Symposium on Theory of Computing, page 160–171, New York, NY, USA, 2024. Association for Computing Machinery. 1 +[36] Amar Viswanathan Kannan, Dmitriy Fradkin, Ioannis Akrotirianakis, Tugba Kulahcioglu, Arquimedes Canedo, Aditi Roy, Shih-Yuan Yu, Malawade Arnav, and Mohammad Abdullah Al Faruque. Multimodal knowledge graph for deep learning papers and code. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 3417-3420, New York, NY, USA, 2020. Association for Computing Machinery. 3 +[37] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 7 +[38] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 6, 7 +[39] Junlin Lee, Yequan Wang, Jing Li, and Min Zhang. Multimodal reasoning with multimodal knowledge graph. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10767-10782, Bangkok, Thailand, 2024. Association for Computational Linguistics. 2, 3, 1 +[40] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In Proceedings of the 39th International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 3 +[41] Junnan Li, Dongxu Li, Silvio Savarese, and Steven C. H. Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning (ICML), pages 19730–19742, 2023. 1, 3, 4, 6, 2 +[42] Xin Li, Dongze Lian, Zhihe Lu, Jiawang Bai, Zhibo Chen, and Xinchao Wang. Graphadapter: Tuning vision-language models with dual knowledge graph. In Advances in Neural Information Processing Systems, pages 13448-13466. Curran Associates, Inc., 2023. 3 +[43] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In Advances in Neural Information Processing Systems, pages 34892-34916. Curran Associates, Inc., 2023. 3, 6, 7 +[44] Junming Liu, Yanting Gao, Siyuan Meng, Yifei Sun, Aoqi Wu, Yufei Jin, Yirong Chen, Ding Wang, and Guosun Zeng. Mosaic: Data-free knowledge distillation via mixture-of-experts for heterogeneous distributed environments. arXiv preprint arXiv:2505.19699, 2025. 1 +[45] Junming Liu, Guosun Zeng, Ding Wang, Yanting Gao, and Yufei Jin. Fedrecon: Missing modality reconstruction in distributed heterogeneous environments. arXiv preprint arXiv:2504.09941, 2025.3 + +[46] Ye Liu, Hui Li, Alberto Garcia-Duran, Mathias Niepert, Daniel Onoro-Rubio, and David S Rosenblum. Mmkg: multi-modal knowledge graphs. In The Semantic Web: 16th International Conference, ESWC 2019, Portoroz, Slovenia, June 2–6, 2019, Proceedings 16, pages 459–474. Springer, 2019. 2, 6, 7 +[47] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 4 +[48] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Advances in Neural Information Processing Systems, pages 2507–2521. Curran Associates, Inc., 2022. 2, 5, 6, 7 +[49] Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Advances in Neural Information Processing Systems, pages 43447-43478. Curran Associates, Inc., 2023. 6, 7 +[50] Gen Luo, Yiyi Zhou, Tianhe Ren, Shengxin Chen, Xiaoshuai Sun, and Rongrong Ji. Cheap and quick: Efficient vision-language instruction tuning for large language models. In Advances in Neural Information Processing Systems, pages 29615-29627. Curran Associates, Inc., 2023. 6 +[51] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3 +[52] Runqing Miao, Qingxuan Jia, Fuchun Sun, Gang Chen, Haiming Huang, and Shengyi Miao. Semantic representation of robot manipulation with knowledge graph. Entropy, 25(4), 2023. 3 +[53] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431, 2024. 6 +[54] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024. 1 +[55] Bryan A. Plummer, Liwei Wang, Chris M. Cervantes, Juan C. Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2015. 2, 1 +[56] Shengsheng Qian, Jun Hu, Quan Fang, and Changsheng Xu. Knowledge-aware multi-modal adaptive graph convolutional networks for fake news detection. ACM Trans. Multimedia Comput. Commun. Appl., 17(3), 2021. 3 +[57] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, + +Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3, 6 +[58] Brian Reily, Christopher Reardon, and Hao Zhang. Representing multi-robot structure through multimodal graph embedding for the selection of robot teams. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 5576–5582, 2020. 3 +[59] Joshua Robinson, Christopher Michael Ryting, and David Wingate. Leveraging large language models for multiple choice question answering. In Proceedings of the International Conference on Learning Representations (ICLR), 2023. 1, 4 +[60] Hrituraj Singh, Anshul Nasery, Denil Mehta, Aishwarya Agarwal, Jatin Lamba, and Balaji Vasan Srinivasan. MI-MOQA: Multimodal input multimodal output question answering. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5317-5332, Online, 2021. Association for Computational Linguistics. 3 +[61] Yaoxian Song, Penglei Sun, Haoyu Liu, Zhixu Li, Wei Song, Yanghua Xiao, and Xiaofang Zhou. Scene-driven multimodal knowledge graph construction for embodied ai. IEEE Transactions on Knowledge and Data Engineering, 36(11): 6962-6976, 2024. 2, 3 +[62] Rui Sun, Xuezhi Cao, Yan Zhao, Junchen Wan, Kun Zhou, Fuzheng Zhang, Zhongyuan Wang, and Kai Zheng. Multimodal knowledge graphs for recommender systems. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 1405-1414, New York, NY, USA, 2020. Association for Computing Machinery. 3 +[63] Yu Sun, Shuohuan Wang, Shikun Feng, Siyu Ding, Chao Pang, Junyuan Shang, Jiaxiang Liu, Xuyi Chen, Yanbin Zhao, Yuxiang Lu, et al. Ernie 3.0: Large-scale knowledge enhanced pre-training for language understanding and generation. arXiv preprint arXiv:2107.02137, 2021. 2 +[64] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 3 +[65] Shengbang Tong, Ellis L Brown II, Penghao Wu, Sanghyun Woo, ADITHYA JAIRAM IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, Xichen Pan, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 1 +[66] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1, 5 +[67] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia + +Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 4 +[68] Peng Wang, Qi Wu, Chunhua Shen, Anthony Dick, and Anton van den Hengel. Fvqa: Fact-based visual question answering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(10):2413-2427, 2018. 3 +[69] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 3, 6 +[70] Xin Wang, Benyuan Meng, Hong Chen, Yuan Meng, Ke Lv, and Wenwu Zhu. Tiva-kg: A multimodal knowledge graph with text, image, video and audio. In Proceedings of the 31st ACM International Conference on Multimedia, page 2391-2399, New York, NY, USA, 2023. Association for Computing Machinery. 3 +[71] Yuequn Wang, Liyan Dong, Hao Zhang, Xintao Ma, Yongli Li, and Minghui Sun. An enhanced multi-modal recommendation based on alternate training with knowledge graph representation. IEEE Access, 8:213012-213026, 2020. 3 +[72] Tao Wu, Mengze Li, Jingyuan Chen, Wei Ji, Wang Lin, Jinyang Gao, Kun Kuang, Zhou Zhao, and Fei Wu. Semantic alignment for multimodal large language models. In Proceedings of the 32nd ACM International Conference on Multimedia, page 3489-3498, New York, NY, USA, 2024. Association for Computing Machinery. 1 +[73] Yike Wu, Nan Hu, Guilin Qi, Sheng Bi, Jie Ren, Anhuan Xie, and Wei Song. Retrieve-rewrite-answer: A kg-to-text enhanced llms framework for knowledge graph question answering. arXiv preprint arXiv:2309.11206, 2023. 2 +[74] Ziyang Xiao, Dongxiang Zhang, Yangjun Wu, Lilin Xu, Yuan Jessica Wang, Xiongwei Han, Xiaojin Fu, Tao Zhong, Jia Zeng, Mingli Song, and Gang Chen. Chain-of-experts: When LLMs meet complex operations research problems. In The Twelfth International Conference on Learning Representations, 2024. 2, 3 +[75] Dexuan Xu, Yanyuan Chen, Jieyi Wang, Yue Huang, Hanpin Wang, Zhi Jin, Hongxing Wang, Weihua Yue, Jing He, Hang Li, and Yu Huang. MLeVLM: Improve multi-level progressive capabilities based on multimodal large language model for medical visual question answering. In Findings of the Association for Computational Linguistics: ACL 2024, pages 4977-4997, Bangkok, Thailand, 2024. Association for Computational Linguistics. 1 +[76] Peng Xu, Xiatian Zhu, and David A. Clifton. Multimodal learning with transformers: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(10):12113-12132, 2023. 3 +[77] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2. 5 technical report. arXiv preprint arXiv:2412.15115, 2024. 6 +[78] Xiaocui Yang, Wenfang Wu, Shi Feng, Ming Wang, Daling Wang, Yang Li, Qi Sun, Yifei Zhang, Xiaoming Fu, and Soujanya Poria. Mm-bigbench: Evaluating multimodal models + +on multimodal content comprehension tasks. arXiv preprint arXiv:2310.09036, 2023. 7 +[79] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 3 +[80] Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023. 1 +[81] Jingtong Yue, Zhiwei Lin, Xin Lin, Xiaoyu Zhou, Xiangtai Li, Lu Qi, Yongtao Wang, and Ming-Hsuan Yang. RobuR-CDet: Enhancing robustness of radar-camera fusion in bird's eye view for 3d object detection. In The Thirteenth International Conference on Learning Representations, 2025. 3 +[82] Yichi Zhang, Zhuo Chen, Lingbing Guo, Yajing Xu, Binbin Hu, Ziqi Liu, Huajun Chen, and Wen Zhang. Mygo: Discrete modality information as fine-grained tokens for multi-modal knowledge graph completion. CoRR, abs/2404.09468, 2024. 2 +[83] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multimodal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, 2024. 3 +[84] Jiabao Zhao, Xin Lin, Jie Zhou, Jing Yang, Liang He, and Zhaohui Yang. Knowledge-based fine-grained classification for few-shot learning. In 2020 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6, 2020. 3 +[85] Changmeng Zheng, Junhao Feng, Ze Fu, Yi Cai, Qing Li, and Tao Wang. Multimodal relation extraction with efficient graph alignment. In Proceedings of the 29th ACM International Conference on Multimedia, page 5298-5306, New York, NY, USA, 2021. Association for Computing Machinery. 2 +[86] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. In Advances in Neural Information Processing Systems, pages 5168-5191. Curran Associates, Inc., 2023. 3, 6, 7 +[87] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In 2024 IEEE 40th International Conference on Data Engineering (ICDE), pages 70-82, 2024. 3 +[88] Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaogian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594, 2023. 4 +[89] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 3 +[90] Xiangru Zhu, Zhixu Li, Xiaodan Wang, Xueyao Jiang, Penglei Sun, Xuwu Wang, Yanghua Xiao, and Nicholas Jing Yuan. Multi-modal knowledge graph construction and ap + +plication: A survey. IEEE Transactions on Knowledge and Data Engineering, 36(2):715-735, 2024. 2 +[91] Zhengxia Zou, Keyan Chen, Zhenwei Shi, Yuhong Guo, and Jieping Ye. Object detection in 20 years: A survey. Proceedings of the IEEE, 111(3):257-276, 2023. 3 + +# Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning + +Supplementary Material + +![](images/a3b9193854505da3f69bfbf1b89c3b890690e66f382f92dbb2246b00c8c73cc4.jpg) +Figure 5. (a) The limited information contained in text-based KGs leads to inaccurate responses. (b) Leveraging MMKGs enables reasoning with enriched multimodal information to produce the correct answer. + +# A. Cross-Modal Reasoning Failures in Textual KGs + +Multimodal learning, by virtue of its capability to synergistically integrate heterogeneous data modalities, establishes a comprehensive knowledge acquisition paradigm that significantly enhances reasoning robustness [39]. This principle extends to Multimodal Knowledge Graphs (MMKGs), where the semantic symbiosis between visual and textual modalities addresses the critical limitation of modal isolation inherent in conventional text-based KGs. As empirically demonstrated in Figure 5, pure textual KGs often induce hallucinated or incomplete responses due to their inability to resolve visual-textual semantic ambiguities. For instance, when queried about fine-grained visual attributes (e.g., spatial relationships or object properties absent in textual metadata), LLMs grounded solely on textual KG triples frequently generate plausible but factually inconsistent answers, as they lack access to cross-modal referential grounding. In contrast, MMKGs bridge this gap through bidirectional visual-textual entity linking, enabling LLMs to retrieve and reason over fused evidence from both modalities. Our qualitative analysis of the case in Figure 5 reveals that the multimodal reasoning path—leveraging both image-derived entities and textual relationships—is essential for deriving logically coherent and factually accurate conclusions. + +![](images/4482265fe190048fcacd251960a888f5a863aba009211a7e298bd63dc9539739.jpg) +Figure 6. Three example social media posts with labelled named entities [8]. + +
Type#ChainsMentions/ChainBoxes/Chain
people597663.171.95
clothing423801.761.44
body parts128091.501.42
animals50863.631.44
vehicles55612.771.21
instruments18272.851.61
scene469192.030.62
other820981.941.04
total2440352.101.13
+ +Table 6. Coreference chain statistics of Flickr30K-Entity. The number of mentions per chain indicates how salient an entity is. The number of boxes per chain indicates how many distinct entities it refers to. + +# B. Case Studies on Manual Annotation Overheads + +The development of robust entity extraction models typically hinges on large-scale annotated corpora, yet the generalizability of these models remains intrinsically bounded by the semantic scope and granularity of their training datasets. Widely-adopted benchmarks such as Flickr30K-Entity [55] exemplify this constraint: while serving as de facto standards for evaluating visual-linguistic entity grounding, their construction necessitates labor-intensive manual annotations at scale. As illustrated in Figure 6, even high-quality annotations in such datasets often adopt a minimalist tagging paradigm—identifying only coarse-grained entities while neglecting fine-grained attributes and contextual relationships. This sparsity of semantic enrichment directly propagates to trained models, which consequently fail to capture the compositional semantics necessary for com + +![](images/b3d612618801180dba35a284f64b34eea05de762b170c942e03ec9dd5a4b8bdd.jpg) +Figure 7. An example from the ScienceQA benchmark [48], illustrating multimodal question-answering scenarios that necessitate joint reasoning over textual prompts and visual evidence. + +plex reasoning scenarios. + +# C. Case Studies on Visual Specificity Deficits in VLM-Generated Captions + +As exemplified in Figure 7, vision-language models like BLIP-2 [41] tend to produce oversimplified textual descriptions that critically lack actionable visual-semantic signals. The VLM-generated caption ("A map of the united states with the location of the united states") merely identifies coarse-grained scene semantics, failing to capture object-level attributes (color coding of regions), spatial relationships (border adjacency between Arizona and Mexico) and compositional context (compass orientation in lower-right corner). In contrast, human annotations ("This is a map of the United States. The main part of the country is shown in green, with several states labeled. Arizona is in the southwestern part of the US, bordering Mexico. Oklahoma is in the central - southern region. Louisiana is located along the Gulf of Mexico in the southeastern part. West Virginia is in the eastern part of the country. There's also a compass in the bottom - right corner to show directions.") demonstrate essential characteristics for multimodal reasoning. + +# D. Retrieval Strategy in MMKG Construction + +We adopt retrieval strategies based on the framework provided by LightRAG [28], which supports multiple modes: + +- local: focuses on context-dependent information; +- global: utilizes global knowledge; +- hybrid: combines local and global retrieval methods; +- naive: performs basic search without advanced techniques; +- mix: integrates knowledge graph and vector retrieval; + +In our implementation, we rely on the hybrid retrieval mode, which balances the precision of local cues with the breadth of global knowledge. This strategy improves the relevance and completeness of retrieved information, which is crucial for high-quality MMKG construction. + +# Algorithm 1 MMKG Generation + +Require: $\hat{S}$ (refined description), $T$ (external knowledge, optional) + +Ensure: $\mathcal{G} = (\mathcal{E},\mathcal{R})$ (knowledge graph) + +1: $\mathcal{T}\gets \hat{S}\oplus T$ $\triangleright$ Concatenate $\hat{S}$ and $T$ +2: $\mathcal{G} \leftarrow$ LightRAG(T) $\triangleright$ Generate graph via LightRAG +3: $(\mathcal{E},\mathcal{R})\gets f_{\mathrm{ERE}}(\mathcal{T})$ Extract entities and relations +4: return $\mathcal{G} = \{(h,r,t)\mid h,t\in \mathcal{E},r\in \mathcal{R}\}$ + +LightRAG is an excellent project that effectively supports automatic MMKG construction, and its retrieval design plays a central role in our framework. Specifically, LightRAG introduces keyword-guided text chunking to expand the retrievable context. By leveraging both high-level and low-level keywords in combination with chunk-level vector retrieval, it enables more comprehensive knowledge access. In addition, the choice of the retrieval model is also important. Larger LLMs have slower retrieval speeds but better performance. In this experiment, we used Qwen2.5-7B for retrieval. We also tested the retrieval performance of 32B and 72B models, which showed a $1\% - 5\%$ improvement in performance, but it also significantly increased the graph construction time. Therefore, we finally adopted a lightweight retrieval model. The details of the entire LightRAG are shown in Algorithm 1. + +# E. Selection of Sensitivity Threshold $\tau$ + +We select the sensitivity threshold $\tau$ empirically based on performance on the validation set. In practice, $\tau$ can be approximately determined by observing the token length distribution of captions: datasets with richer visual content and longer captions tend to benefit from a lower $\tau$ , while simpler datasets can tolerate a higher $\tau$ . This provides a practical way to adjust $\tau$ without extensive tuning. + +In addition, we notice a key pattern when analyzing the relevance scores across windows. Around certain values of $\tau$ , the scores tend to cluster tightly on both sides of the threshold. As a result, even a small change in $\tau$ near these points can lead to a large change in the number of tokens being pruned. This indicates that the pruning process is especially sensitive around those points, and adjusting $\tau$ even slightly may have a big impact on the final token budget. + +# F. Construction Cost and Scalability + +Construction cost is a complex issue, which we analyze from the perspectives of time and hardware requirements. Time-wise, the main components are CoE and LightRAG. While using APIs can significantly speed up the process, offline deployment and inference are also feasible. For example, generating descriptions with Qwen2-VL-7B achieves around 60 tokens per second, processing one image ev + +ery 4 seconds. Thus, processing 1k images takes approximately 1.21 hours. Constructing a KG with Qwen2.5-7B yields about 196k tokens per hour, leading to a total of 1.33 hours for 1k images. The intermediate pruning step, accelerated by CLIP's fast processing speed, is negligible. Overall, the cost is much lower than manual annotation or fine-tuning LLMs, making the method applicable to largescale datasets. For resource-constrained users, deploying a lightweight VLM with CoE is comparable to or even more efficient than deploying a powerful VLM, further demonstrating the scalability of our approach. + +# G. Discussion on VLM Usage and Design Flexibility + +Our observations on the number and type of VLMs used in CoE are consistent with the original conclusions drawn in the CoE paper [74]. Regardless of the specific VLM architecture, increasing the number of models $N$ consistently improves performance up to a saturation point, after which further scaling yields diminishing returns. Moreover, we find that convergence is achieved more quickly when using lower softmax temperatures or simpler datasets. These factors reduce the ambiguity in model disagreement, allowing consensus to form more rapidly among the ensemble. + +Interestingly, our results also show that using a single, strong VLM can achieve performance comparable to a cascade of smaller, lightweight models. This suggests a practical trade-off between model strength and ensemble size—while ensembling helps in reaching consensus across diverse weak learners, a single high-capacity model may suffice in many scenarios, especially when computational resources are limited. + +In the original CoE method, the outputs from all VLM experts are first aggregated together, and then a selection process determines which expert descriptions to use. To save time in constructing the MMKGs with LLMs, we instead adopted a sequential strategy where the output of one expert is used as the prompt input for the next. We also evaluated the original aggregation and selection strategy on a smaller-scale dataset and found it to perform well, sometimes even surpassing the sequential approach. This confirms that CoE's original design of aggregating all experts' outputs before selecting which descriptions to use is effective and remains a strong baseline. However, correspondingly, using LLMs to construct MMKGs based on these aggregated descriptions requires significantly more time. + +Additionally, while we apply pruning only at the final description step, pruning during intermediate steps may also yield good results depending on the dataset and task. There is no fixed rule for when or how to apply pruning, and our framework is designed to be flexible enough to accommodate different strategies. We emphasize that both our CoE framework and the SV step are intended to be adaptable, al + +lowing users to experiment freely and select the approach that best suits their needs. + +There are various VLMs that can be used for pruning. Among them, we recommend CLIP due to its fast inference speed and pruning performance comparable to other VLMs. Given its efficiency and effectiveness, CLIP serves as a practical choice for pruning in many scenarios. \ No newline at end of file diff --git a/data/2025/2503_12xxx/2503.12972/images/05342ce1e79f74e662270b7c353d9bd93d8429578bb671ee1bbfba5e163509a6.jpg b/data/2025/2503_12xxx/2503.12972/images/05342ce1e79f74e662270b7c353d9bd93d8429578bb671ee1bbfba5e163509a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c3169d1a5cd7491d63137ff83f5edf351624cce --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/05342ce1e79f74e662270b7c353d9bd93d8429578bb671ee1bbfba5e163509a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee26cc5522485999b23f16bf15ec75c473d76b219b1c98d3a8cb0c3b5dca6604 +size 11139 diff --git a/data/2025/2503_12xxx/2503.12972/images/0565a1818d28d5c065a7ad32bab646f9d5e0994ab7c5dbfd84d22b45087b7151.jpg b/data/2025/2503_12xxx/2503.12972/images/0565a1818d28d5c065a7ad32bab646f9d5e0994ab7c5dbfd84d22b45087b7151.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e73bc9036230c5b2437b996c86d569bd4b215272 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/0565a1818d28d5c065a7ad32bab646f9d5e0994ab7c5dbfd84d22b45087b7151.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92bd8fd1efa774509efb6b4f787cd3e267cbe200d0e8170f30053a6d855af95b +size 55935 diff --git a/data/2025/2503_12xxx/2503.12972/images/09ce5c827fe27b873450b6d2c1fba26985bd7235d4c6e26d0aa70b1fdb91b127.jpg b/data/2025/2503_12xxx/2503.12972/images/09ce5c827fe27b873450b6d2c1fba26985bd7235d4c6e26d0aa70b1fdb91b127.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d5b01c8042c3c9461bf05e29c31db9a0497703a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/09ce5c827fe27b873450b6d2c1fba26985bd7235d4c6e26d0aa70b1fdb91b127.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2fb511b7d082137b89ec270aeaacf4b75842bc965e5cf29fe1929d53c65eb61 +size 10594 diff --git a/data/2025/2503_12xxx/2503.12972/images/1f7f23d221607565287eca865410c8391f1d00a48c77b2056efcc5e12a9feee2.jpg b/data/2025/2503_12xxx/2503.12972/images/1f7f23d221607565287eca865410c8391f1d00a48c77b2056efcc5e12a9feee2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d457378b92a840ec9c2263d98138d4017be9b3f2 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/1f7f23d221607565287eca865410c8391f1d00a48c77b2056efcc5e12a9feee2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f727452dfa545835525ffdbea5c2f1145965a77653cae436dc3c561efee27e1 +size 4282 diff --git a/data/2025/2503_12xxx/2503.12972/images/3062818bfab71156811bf5d45f1a407fa2df4f06a513f4ad9ca2606598e0e311.jpg b/data/2025/2503_12xxx/2503.12972/images/3062818bfab71156811bf5d45f1a407fa2df4f06a513f4ad9ca2606598e0e311.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41ef508d1b0e8c11bea523ec9d8c32d2c6807925 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/3062818bfab71156811bf5d45f1a407fa2df4f06a513f4ad9ca2606598e0e311.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf83f1809dfac6bcd7f886c283b7dd5452a56fdebab5fad38b137c08815477cb +size 4094 diff --git a/data/2025/2503_12xxx/2503.12972/images/3790fc9cc3c4a05154d2d0fc160ef6d6d27e8609eb80567e719a7f54aa2e6a58.jpg b/data/2025/2503_12xxx/2503.12972/images/3790fc9cc3c4a05154d2d0fc160ef6d6d27e8609eb80567e719a7f54aa2e6a58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..304bc0abf6fe7aef0ee0fae32a513efcb13217f4 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/3790fc9cc3c4a05154d2d0fc160ef6d6d27e8609eb80567e719a7f54aa2e6a58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43d6e587ccef2c38031b5d859bb51f4f020090c94de2cea830f3a7b35900fbdd +size 11681 diff --git a/data/2025/2503_12xxx/2503.12972/images/3ae9a45583be9946e86dc7de188a71f381a9113b43215237e1f028a2f67cfac2.jpg b/data/2025/2503_12xxx/2503.12972/images/3ae9a45583be9946e86dc7de188a71f381a9113b43215237e1f028a2f67cfac2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..beab4e362d3f5b0675dc73b3acbc0b7d2c4bfa7a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/3ae9a45583be9946e86dc7de188a71f381a9113b43215237e1f028a2f67cfac2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d5e83c49cf24eeffb1296afb8cbfdde4a61571a178d84039ebf3c2fdf658025 +size 118284 diff --git a/data/2025/2503_12xxx/2503.12972/images/41fbae9956bdbe831d0208c015c2756237ce428c21b43c97f3a94b8e57eae94b.jpg b/data/2025/2503_12xxx/2503.12972/images/41fbae9956bdbe831d0208c015c2756237ce428c21b43c97f3a94b8e57eae94b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9659fdf352ac1f181bf821554dc9a65050404b4 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/41fbae9956bdbe831d0208c015c2756237ce428c21b43c97f3a94b8e57eae94b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63d161d34b1fbd09a5bcd5f7e6a604c20370ede8696055affc96f382dc6baf95 +size 3999 diff --git a/data/2025/2503_12xxx/2503.12972/images/4482265fe190048fcacd251960a888f5a863aba009211a7e298bd63dc9539739.jpg b/data/2025/2503_12xxx/2503.12972/images/4482265fe190048fcacd251960a888f5a863aba009211a7e298bd63dc9539739.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d270318da7bb743e21821f5483b246af3f71a413 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/4482265fe190048fcacd251960a888f5a863aba009211a7e298bd63dc9539739.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:886f831f3c61ed2fde62677ce95f039d9a6f71e18c12d01ccac6b60e7a0b338b +size 42449 diff --git a/data/2025/2503_12xxx/2503.12972/images/484545c159fe844bb510c12ebc2cd4c6f052d098b4a136c90ac20cca5181cd7d.jpg b/data/2025/2503_12xxx/2503.12972/images/484545c159fe844bb510c12ebc2cd4c6f052d098b4a136c90ac20cca5181cd7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..762107540495f771eb273bca27badbf47634bca5 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/484545c159fe844bb510c12ebc2cd4c6f052d098b4a136c90ac20cca5181cd7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8731a4a9e52aaefd881b2d99ba702ac3bfd5376b58156054a873105e9e31e8fd +size 4570 diff --git a/data/2025/2503_12xxx/2503.12972/images/4afcd56b1fa4990fa61cf4b7cfa0d337feceab24bf56a82c3aa78cb2285b2360.jpg b/data/2025/2503_12xxx/2503.12972/images/4afcd56b1fa4990fa61cf4b7cfa0d337feceab24bf56a82c3aa78cb2285b2360.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b61b831d364df347e547effedba5ed0d8b9b316 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/4afcd56b1fa4990fa61cf4b7cfa0d337feceab24bf56a82c3aa78cb2285b2360.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24f318eef3799463773c6597616bf19ae6c04e90551f15b044b370b9083e36ed +size 50609 diff --git a/data/2025/2503_12xxx/2503.12972/images/4c07c1b06d4321725777668e77059d3bd720c3641b208a370e2a8d113fdb9bbe.jpg b/data/2025/2503_12xxx/2503.12972/images/4c07c1b06d4321725777668e77059d3bd720c3641b208a370e2a8d113fdb9bbe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3fc8c4e39777d8babb5ea50f4a0095e2fecbfc7 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/4c07c1b06d4321725777668e77059d3bd720c3641b208a370e2a8d113fdb9bbe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d6215ba6cd176643f07178334c092e520cdf8bb2ee9b0dd373714951678266b +size 68445 diff --git a/data/2025/2503_12xxx/2503.12972/images/4ed5640ca4dff132eb932d23112990edf9118e03775355bded4484c067665b93.jpg b/data/2025/2503_12xxx/2503.12972/images/4ed5640ca4dff132eb932d23112990edf9118e03775355bded4484c067665b93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2207cba8f1d4b9e370b22cfd778ae65395cb54e3 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/4ed5640ca4dff132eb932d23112990edf9118e03775355bded4484c067665b93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da23215f8c9068a838bc7da100bce587ce0b8797d2e3ea87ca3bba8508367689 +size 187028 diff --git a/data/2025/2503_12xxx/2503.12972/images/4f0401946432d33d9c1cf582ceba501170762e9bc5154f160d35a6b7809d9e45.jpg b/data/2025/2503_12xxx/2503.12972/images/4f0401946432d33d9c1cf582ceba501170762e9bc5154f160d35a6b7809d9e45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd09dd3d5f8607994df7033e98ec3001460e1122 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/4f0401946432d33d9c1cf582ceba501170762e9bc5154f160d35a6b7809d9e45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e102241d38dfa10da4a144c5cfc41e0ae7641ca3c240914bcf86cb1e48999fab +size 119909 diff --git a/data/2025/2503_12xxx/2503.12972/images/53202978704d71375752ea29410832e57b68988c7b72a4f9edd34de6f0500125.jpg b/data/2025/2503_12xxx/2503.12972/images/53202978704d71375752ea29410832e57b68988c7b72a4f9edd34de6f0500125.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f304f38fd2c26ecf41286b11d882698865bdf8b1 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/53202978704d71375752ea29410832e57b68988c7b72a4f9edd34de6f0500125.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81d1b03e7fc4e219358b08bfae71429d6e846bc357f0c0f0303ff43146f01a74 +size 3681 diff --git a/data/2025/2503_12xxx/2503.12972/images/53b3c1ef64a002a98e518980197c31d8968d68df2609a7612ca99b1b774dfcdb.jpg b/data/2025/2503_12xxx/2503.12972/images/53b3c1ef64a002a98e518980197c31d8968d68df2609a7612ca99b1b774dfcdb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4345f48adfb4a8176408adc5371cb01ef07b5aa1 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/53b3c1ef64a002a98e518980197c31d8968d68df2609a7612ca99b1b774dfcdb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:117c24017b06ee5a5539b248b6fc5c0ef17add51ecd71e1a285eba37c25510c8 +size 3250 diff --git a/data/2025/2503_12xxx/2503.12972/images/65280c81158ceeaf1600375296559fa3912dc5fb2ff19e0b0677dd1e25dc78a1.jpg b/data/2025/2503_12xxx/2503.12972/images/65280c81158ceeaf1600375296559fa3912dc5fb2ff19e0b0677dd1e25dc78a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ae9c063b68b29e56f0d9a0c9f3bfe1a53aa1fb3 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/65280c81158ceeaf1600375296559fa3912dc5fb2ff19e0b0677dd1e25dc78a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d776b8da4217dede878e733991aec6462f1e8d19c905c8998ee2d50849a73a0 +size 5529 diff --git a/data/2025/2503_12xxx/2503.12972/images/70c509381366e918387c50f30aab692b2a319db199d55fff823b7322e85e7c0d.jpg b/data/2025/2503_12xxx/2503.12972/images/70c509381366e918387c50f30aab692b2a319db199d55fff823b7322e85e7c0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..549797aaf8f2e47020c8a3f4e666d4b2b9c02a5c --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/70c509381366e918387c50f30aab692b2a319db199d55fff823b7322e85e7c0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f18232ebe5a24e4b4ee88b6f76e72f5b922e27095b4c0b624be6cd93b8007afe +size 36702 diff --git a/data/2025/2503_12xxx/2503.12972/images/a0c8d9c34fe5e9db284faa164d7f4985af9b1ae1e7b21f3d54728b3d6c9b0494.jpg b/data/2025/2503_12xxx/2503.12972/images/a0c8d9c34fe5e9db284faa164d7f4985af9b1ae1e7b21f3d54728b3d6c9b0494.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cec2c7711170c4e1ac4503634d4c11ea1552d7c --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/a0c8d9c34fe5e9db284faa164d7f4985af9b1ae1e7b21f3d54728b3d6c9b0494.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e9e83628ac490567cc207ed222d5bfc311f76f0fb7e63d91a2666bdd02eeba3 +size 29435 diff --git a/data/2025/2503_12xxx/2503.12972/images/a3b9193854505da3f69bfbf1b89c3b890690e66f382f92dbb2246b00c8c73cc4.jpg b/data/2025/2503_12xxx/2503.12972/images/a3b9193854505da3f69bfbf1b89c3b890690e66f382f92dbb2246b00c8c73cc4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..638df25ae1da35ec07bc993b72a11eb9c606e3a2 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/a3b9193854505da3f69bfbf1b89c3b890690e66f382f92dbb2246b00c8c73cc4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e2ced21d25b87b8d6eef1323477fd798afe573ecb75ab7e1e1a9ceba6f5c3d +size 88400 diff --git a/data/2025/2503_12xxx/2503.12972/images/b3d612618801180dba35a284f64b34eea05de762b170c942e03ec9dd5a4b8bdd.jpg b/data/2025/2503_12xxx/2503.12972/images/b3d612618801180dba35a284f64b34eea05de762b170c942e03ec9dd5a4b8bdd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88ad23bd9199a93f04e7960a5eaf9c59fc8b78a8 --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/b3d612618801180dba35a284f64b34eea05de762b170c942e03ec9dd5a4b8bdd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4456105a61705b1de13278584adbd95b0cc4f07f98657aa131262f1d14dddb1e +size 23285 diff --git a/data/2025/2503_12xxx/2503.12972/images/b7c15acd35fb4773bd57aa71f9b7d1854155539b0a304cfacd0f1f8f156ffa59.jpg b/data/2025/2503_12xxx/2503.12972/images/b7c15acd35fb4773bd57aa71f9b7d1854155539b0a304cfacd0f1f8f156ffa59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a50e79fd27ba6ddfdd515801b4a34ee4baf9dbaf --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/b7c15acd35fb4773bd57aa71f9b7d1854155539b0a304cfacd0f1f8f156ffa59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e14b4fc0b4ce4e43598cab9575cc524a101549a4de178f138877cf6376e504cf +size 3875 diff --git a/data/2025/2503_12xxx/2503.12972/images/d0ca33cc3a3e03e4e8ea5575cfe6a04a4fc3f8aae7966ec38050d582e149390d.jpg b/data/2025/2503_12xxx/2503.12972/images/d0ca33cc3a3e03e4e8ea5575cfe6a04a4fc3f8aae7966ec38050d582e149390d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..344a6d7ae9079b7ab6873268664fc5b1cd57871e --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/d0ca33cc3a3e03e4e8ea5575cfe6a04a4fc3f8aae7966ec38050d582e149390d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2230e29ef1cab92606f9d7b9ecfd92c333cf5c523c10f6e9f67a18b7335ebcf4 +size 7884 diff --git a/data/2025/2503_12xxx/2503.12972/images/daad939622d1365740ad713921e37ab22dcfbea1497d31a40eb87a78fb1f9c75.jpg b/data/2025/2503_12xxx/2503.12972/images/daad939622d1365740ad713921e37ab22dcfbea1497d31a40eb87a78fb1f9c75.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4338efc5af8eea8da656c9dc72aa899bab7d106a --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/daad939622d1365740ad713921e37ab22dcfbea1497d31a40eb87a78fb1f9c75.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:915341d82388ca62374dd132b8174ff619b99e4f6767855e5c0584354b2a0c42 +size 55216 diff --git a/data/2025/2503_12xxx/2503.12972/images/e5a8193224fd6a409dbb805babf77da1861be7852ca533ec2b2ea659d41fb770.jpg b/data/2025/2503_12xxx/2503.12972/images/e5a8193224fd6a409dbb805babf77da1861be7852ca533ec2b2ea659d41fb770.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcf324f1b3c5575cbfd3347229b3789be613fe8d --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/e5a8193224fd6a409dbb805babf77da1861be7852ca533ec2b2ea659d41fb770.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96c073fb7d4914274d7e38dfc34a87bdf5954555329becf82b7e6d217228bb56 +size 4646 diff --git a/data/2025/2503_12xxx/2503.12972/images/f2b3f86b15576932ab020991c6fe949147864e448d956a2befbbe1e596ebc116.jpg b/data/2025/2503_12xxx/2503.12972/images/f2b3f86b15576932ab020991c6fe949147864e448d956a2befbbe1e596ebc116.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e5ee5889930a221c9ac809e4f72b0c9511c27fb --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/images/f2b3f86b15576932ab020991c6fe949147864e448d956a2befbbe1e596ebc116.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0158eb98ead5cf3cec337239f7a0ab9593e23a740730021d8d584553373574ad +size 5937 diff --git a/data/2025/2503_12xxx/2503.12972/layout.json b/data/2025/2503_12xxx/2503.12972/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2c30b55b4fd5b6dfa9e6ad954501a92aa98f972d --- /dev/null +++ b/data/2025/2503_12xxx/2503.12972/layout.json @@ -0,0 +1,12294 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 65, + 102, + 545, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 102, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 65, + 102, + 545, + 140 + ], + "type": "text", + "content": "Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "spans": [ + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "content": "Junming Liu" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "content": ", Siyuan Meng" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "content": ", Yanting Gao" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "content": ", Song Mao" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "content": ", Pinlong Cai" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 125, + 160, + 484, + 175 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "spans": [ + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "text", + "content": "Guohang Yan" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "text", + "content": ", Yirong Chen" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "inline_equation", + "content": "^{2,4}" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "text", + "content": ", Zilin Bian" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "text", + "content": ", Ding Wang" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "text", + "content": ", Botian Shi" + }, + { + "bbox": [ + 136, + 175, + 474, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 190, + 462, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 190, + 462, + 204 + ], + "spans": [ + { + "bbox": [ + 149, + 190, + 462, + 204 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 149, + 190, + 462, + 204 + ], + "type": "text", + "content": "Tongji University " + }, + { + "bbox": [ + 149, + 190, + 462, + 204 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 149, + 190, + 462, + 204 + ], + "type": "text", + "content": "Shanghai Artificial Intelligence Laboratory" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "spans": [ + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "text", + "content": "East China Normal University " + }, + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "text", + "content": "Stanford University " + }, + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 115, + 203, + 495, + 217 + ], + "type": "text", + "content": "New York University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 162, + 220, + 441, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 220, + 441, + 232 + ], + "spans": [ + { + "bbox": [ + 162, + 220, + 441, + 232 + ], + "type": "text", + "content": "liu_junming6917@tongji.edu.cn wangding@pjlab.org.cn" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 151, + 258, + 200, + 271 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 258, + 200, + 271 + ], + "spans": [ + { + "bbox": [ + 151, + 258, + 200, + 271 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 283, + 296, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 283, + 296, + 606 + ], + "spans": [ + { + "bbox": [ + 54, + 283, + 296, + 606 + ], + "type": "text", + "content": "Multimodal reasoning in Large Language Models (LLMs) struggles with incomplete knowledge and hallucination artifacts, challenges that textual Knowledge Graphs (KGs) only partially mitigate due to their modality isolation. While Multimodal Knowledge Graphs (MMKGs) promise enhanced cross-modal understanding, their practical construction is impeded by semantic narrowness of manual text annotations and inherent noise in visual-semantic entity linkages. In this paper, we propose Vision-align-to-Language integrated Knowledge Graph (VaLiK), a novel approach for constructing MMKGs that enhances LLMs reasoning through cross-modal information supplementation. Specifically, we cascade pre-trained Vision-Language Models (VLMs) to align image features with text, transforming them into descriptions that encapsulate image-specific information. Furthermore, we developed a cross-modal similarity verification mechanism to quantify semantic consistency, effectively filtering out noise introduced during feature alignment. Even without manually annotated image captions, the refined descriptions alone suffice to construct the MMKG. Compared to conventional MMKGs construction paradigms, our approach achieves substantial storage efficiency gains while maintaining direct entity-to-image linkage capability. Experimental results on multimodal reasoning tasks demonstrate that LLMs augmented with VaLiK outperform previous state-of-the-art models. Our code is published at https://github.com/Wings-Of-Disaster/VaLiK." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 628, + 135, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 628, + 135, + 640 + ], + "spans": [ + { + "bbox": [ + 56, + 628, + 135, + 640 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 648, + 296, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 648, + 296, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 648, + 296, + 696 + ], + "type": "text", + "content": "Recent advancements in Large Language Models (LLMs) [2, 10, 26, 66] have demonstrated their superiority and versatility across various Natural Language Reasoning (NLR) tasks [9, 44, 54, 59]. To enhance LLMs into the" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 317, + 259, + 552, + 543 + ], + "blocks": [ + { + "bbox": [ + 317, + 259, + 552, + 543 + ], + "lines": [ + { + "bbox": [ + 317, + 259, + 552, + 543 + ], + "spans": [ + { + "bbox": [ + 317, + 259, + 552, + 543 + ], + "type": "image", + "image_path": "4f0401946432d33d9c1cf582ceba501170762e9bc5154f160d35a6b7809d9e45.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 553, + 555, + 597 + ], + "lines": [ + { + "bbox": [ + 313, + 553, + 555, + 597 + ], + "spans": [ + { + "bbox": [ + 313, + 553, + 555, + 597 + ], + "type": "text", + "content": "Figure 1. (a) Training entity extraction models relies on extensive fine-grained annotations, increasing labeling costs. More examples are provided in Appendix B. (b) Capturing implicit semantic associations demands abstract comprehension or logical inference." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": "realm of multimodal reasoning, researchers [65, 72, 75, 80] have endeavored to equip these models with multimodal capabilities, as evidenced by advancements in Multimodal Large Language Models (MLLMs) such as BLIP-2 [41], GPT-4o [33], Janus-Pro [14], among others. Despite their notable progress, these models often experience hallucinations [5, 35], primarily arising from knowledge deficiencies due to incomplete or obsolete information." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 217, + 35, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 35, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 35, + 574 + ], + "type": "text", + "content": "arXiv:2503.12972v3 [cs.CV] 21 Nov 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 703, + 144, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 144, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 144, + 712 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "content": "Fine-tuning LLMs demands prohibitive computational costs [32]. While text-based Knowledge Graphs (KGs) have partially addressed this limitation by efficient real-time updates [6, 63, 73], they are still restricted by modal isolation, which hinders cross-modal reasoning, as detailed in Appendix A. To bridge this semantic fragmentation, Multimodal Knowledge Graphs (MMKGs) have been developed as unified representational frameworks [11, 34, 39, 46]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 170, + 294, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 170, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 55, + 170, + 294, + 278 + ], + "type": "text", + "content": "However, constructing robust MMKGs faces two primary obstacles [16, 90]. First, the lack of large-scale fine-grained entity-image corpora makes it infeasible to train high-quality entity extractors, significantly constraining scalability, as illustrated in Figure 1a. Second, conventional visual relation detectors primarily identify superficial spatial interactions instead of semantic relations consistent with KGs, while frequently hallucinating implausible connections that corrupt graph integrity, as shown in Figure 1b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 281, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 281, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 281, + 295, + 628 + ], + "type": "text", + "content": "In this paper, we propose VaLiK, short for Vision-align-to-Language integrated Knowledge Graph, a novel framework designed to empower LLMs with advanced multimodal reasoning. Unlike traditional methods that rely on text annotations for training extraction models and the knowledge construction process [55], VaLiK adopts a annotation-free approach to MMKGs construction. Specifically, we first employ several pretrained Vision-Language models (VLMs), designed based on Chain-of-Experts (CoE) principles [74], to convert visual inputs into image-specific textual descriptions through cross-modal feature alignment. This procedure eliminates the need for manually annotated image captions in both the knowledge extraction and construction phases while preserving visual details typically missing in generic text descriptions. Moreover, in contrast to existing relation detection methods that require predefined label taxonomies [17, 61, 82, 85], VaLiK excels at extracting profound semantic relationships that are both KG-compatible and capture novel associations beyond training supervision. While VLMs enable cross-modal reasoning and interpretation, they introduce spurious relational noise through hallucinated inter-modal attributions, as depicted in Figure 2. We address this limitation through cross-modal similarity recalibration, strategically filtering inconsistent information while preserving valid semantic correspondences. Finally, the purified descriptions are systematically organized into MMKGs via LLM-driven symbolic structuring [28], bridging visual and textual domains with factual consistency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 630, + 294, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 294, + 713 + ], + "type": "text", + "content": "To thoroughly evaluate the VaLiK method, we conduct a comprehensive assessment across two critical multimodal benchmarks: multimodal classification (tested on the CrisisMMD dataset [3]) and multimodal question answering (evaluated via the ScienceQA benchmark [48]). The experiments span diverse LLM architectures and MMKG construction techniques to ensure the framework's robustness." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 317, + 73, + 552, + 208 + ], + "blocks": [ + { + "bbox": [ + 317, + 73, + 552, + 208 + ], + "lines": [ + { + "bbox": [ + 317, + 73, + 552, + 208 + ], + "spans": [ + { + "bbox": [ + 317, + 73, + 552, + 208 + ], + "type": "image", + "image_path": "0565a1818d28d5c065a7ad32bab646f9d5e0994ab7c5dbfd84d22b45087b7151.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 217, + 553, + 240 + ], + "lines": [ + { + "bbox": [ + 313, + 217, + 553, + 240 + ], + "spans": [ + { + "bbox": [ + 313, + 217, + 553, + 240 + ], + "type": "text", + "content": "Figure 2. Feature-aligned descriptions from VLMs introduce redundant and inaccurate relationship patterns." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 251, + 553, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 553, + 323 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 553, + 323 + ], + "type": "text", + "content": "The experimental results demonstrate that the MMKGs constructed by VaLiK achieve superior multimodal reasoning performance in LLMs while requiring substantially less storage than conventional approaches. More importantly, the proposed approach retains direct entity-to-image linkage capabilities even with the compressed graph structure." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 324, + 553, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 324, + 553, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 324, + 553, + 372 + ], + "type": "text", + "content": "In summary, VaLiK is the first framework that enables end-to-end, annotation-free, zero-shot, and storage-efficient multimodal knowledge construction with high adaptability and scalability. Our key contributions include:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 373, + 553, + 577 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 314, + 373, + 553, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 373, + 553, + 445 + ], + "spans": [ + { + "bbox": [ + 314, + 373, + 553, + 445 + ], + "type": "text", + "content": "- To the best of our knowledge, VaLik is the first end-to-end framework to build Annotation-Free MMKGs to improve LLMs' multimodal reasoning capabilities, effectively eliminating the need for manually annotated textual material and enabling a completely autonomous multimodal knowledge generation process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 445, + 553, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 445, + 553, + 517 + ], + "spans": [ + { + "bbox": [ + 314, + 445, + 553, + 517 + ], + "type": "text", + "content": "- We offer an innovative zero-shot method for constructing MMKG that captures deep semantic connections beyond traditional predetermined labels with an effective verification system that guarantees the accuracy of these relationships. The knowledge distillation paradigm greatly decreases storage while maintaining semantic integrity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 517, + 553, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 517, + 553, + 577 + ], + "spans": [ + { + "bbox": [ + 314, + 517, + 553, + 577 + ], + "type": "text", + "content": "- We develop a highly modular and extensible architecture that allows VaLiK to effortlessly incorporate new models and workflows for specialized domain tasks, facilitating rapid adaptation to diverse application scenarios without incurring expensive system changes." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 590, + 400, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 590, + 400, + 602 + ], + "spans": [ + { + "bbox": [ + 314, + 590, + 400, + 602 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 611, + 485, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 611, + 485, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 611, + 485, + 624 + ], + "type": "text", + "content": "2.1. Multimodal Knowledge Graphs" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 629, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 713 + ], + "type": "text", + "content": "The principal advantage of MMKGs resides in their multimodal integration beyond conventional KGs. By linking entities with corresponding visual or textual data, MMKGs introduce valuable visual and textual information to the knowledge base, substantially advancing multimodal reasoning capabilities. This combination addresses core challenges in tasks that inherently demand multimodal synergy" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 300 + ], + "type": "text", + "content": "like autonomous driving [27, 29], image-text retrieval [24, 87] and robotic manipulation [52, 58]. However, constructing trustworthy MMKGs with minimal manual effort remains a critical challenge. Recent studies have proposed innovative strategies to enhance MMKG reliability and utility. For instance, Chen et al. [13] proposed MSPT, a framework addressing continual MMKG construction through gradient modulation for balanced multimodal learning and attention distillation to mitigate catastrophic forgetting. Song et al. [61] developed Scene-MMKG, integrating knowledge engineering with large language models to improve robotic manipulation by resolving data sparsity and knowledge uncertainty. Wang et al. [70] introduced TIVA-KG, the first quad-modal knowledge graph spanning text, image, video, and audio with triplet grounding, empirically validating its effectiveness in downstream tasks. While these advances enhance multimodal reasoning capabilities, their efficacy remains rooted in resource-intensive paradigms, requiring extensively annotated datasets for knowledge acquisition." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 308, + 293, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 308, + 293, + 322 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 293, + 322 + ], + "type": "text", + "content": "2.2. Knowledge-Augmented Multimodal Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 327, + 295, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 327, + 295, + 579 + ], + "spans": [ + { + "bbox": [ + 55, + 327, + 295, + 579 + ], + "type": "text", + "content": "Multimodal learning has seen significant progress in aligning and integrating information across different data modalities [7, 45, 76]. The incorporation of structured knowledge through MMKGs further enhances these approaches, improving the reasoning capabilities and generalization across a variety of domains, such as visual question answering [51, 60, 68], recommendation systems [18, 62, 71], and classification [31, 56, 84]. Methods like GraphAdapter's dual-KG adaptation [42] and contrastive multi-relational encoding with KGs [23] inject external knowledge into models, refining their performance and improving their capability to handle complex tasks. Additionally, Lee et al. [39] proposed MR-MKG, a novel framework that constructs task-specific MMKGs to enhance multimodal reasoning in LLMs. These knowledge-augmented paradigms demonstrate superior cross-modal semantic grounding compared to unimodal approaches [15, 36]. However, their reliance on preconstructed MMKGs often leads to domain discrepancies, where generic knowledge schemas misalign with task-specific reasoning patterns, ultimately limiting contextual precision in target applications." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 587, + 251, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 587, + 251, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 587, + 251, + 601 + ], + "type": "text", + "content": "2.3. Multimodal Large Language Models" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 605, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 715 + ], + "type": "text", + "content": "The limitations of text-only LLMs in meeting increasingly complex demands have spurred extensive research [79, 83, 86] into developing LLMs capable of effectively processing and reasoning over multimodal inputs. Current research predominantly employs adapter or projection layers to connect the embedding spaces of various modality-specific encoders with the textual embedding space of LLMs [39]. For instance, foundational models like CLIP [57] and BLIP [40] pioneered cross-modal alignment by jointly training vision" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 555, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 253 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 253 + ], + "type": "text", + "content": "and text encoders to map images and text into a shared embedding space. Building on this, LLaVA [43] and Flamingo [4] advanced the field by integrating visual encoders with LLMs, enabling more nuanced multimodal understanding and generation. More recently, Gemini [64], Qwen2-VL [69] and GPT-4o [33] have further pushed the boundaries by scaling up multimodal pretraining and introducing sophisticated mechanisms for cross-modal interaction. However, multimodal LLMs remain prone to hallucinations. While they enhance cross-modal alignment, they neither acquire new knowledge nor avoid introducing noise through integration. To address these limitations, VaLiK \"uses the master's tools to refine the master's craft,\" first constructing MMKGs via MLLMs and then leveraging them to enhance MLLMs' reasoning capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 263, + 370, + 275 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 263, + 370, + 275 + ], + "spans": [ + { + "bbox": [ + 313, + 263, + 370, + 275 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "spans": [ + { + "bbox": [ + 313, + 283, + 555, + 428 + ], + "type": "text", + "content": "In this section, we present the technical details of VaLiK. VaLiK introduces a novel expansion-reduction paradigm for visual knowledge extraction. The architecture initially organizes several VLMs with distinct knowledge domains, designed based on CoE principles [74], to produce comprehensive textual descriptions encompassing hierarchical visual details. A cross-modal similarity verification mechanism then iteratively filters out noisy tokens through cross-modal alignment while preserving semantically salient elements. This optimization-style approach eliminates external textual dependencies while enabling effective MMKG construction. VaLiK's framework is shown in Figure 3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 435, + 529, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 435, + 529, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 435, + 529, + 449 + ], + "type": "text", + "content": "3.1. CoE-based Visual to Language Modeling" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 453, + 554, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 453, + 554, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 453, + 554, + 548 + ], + "type": "text", + "content": "Recent entity detection techniques [20, 81, 91] have been widely adopted for entity and relation extraction in MMKG construction. However, these methods are inherently limited by predefined categorical boundaries, lacking the capacity to recognize visual concepts outside their training vocabulary. In contrast, VLMs pretrained on web-scale corpora [12, 41, 89] exhibit broader recognition capabilities through exposure to diverse visual concepts." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 548, + 554, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 548, + 554, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 548, + 554, + 609 + ], + "type": "text", + "content": "We therefore leverage pretrained VLMs to extract comprehensive visual information. This process removes the necessity for detailed fine-grained data typically required to train specialized recognition models. The generalized vision to language conversion pipeline can be formalized as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 383, + 616, + 555, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 616, + 555, + 637 + ], + "spans": [ + { + "bbox": [ + 383, + 616, + 555, + 637 + ], + "type": "interline_equation", + "content": "S = \\mathcal {D} _ {\\text {t e x t}} \\left(\\mathcal {A} \\left(\\mathcal {E} _ {\\text {v i s}} (I)\\right)\\right), \\tag {1}", + "image_path": "1f7f23d221607565287eca865410c8391f1d00a48c77b2056efcc5e12a9feee2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": " denotes for the input image, " + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{\\mathrm{vis}}" + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": " denotes the visual encoder extracting visual features, " + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": " carries out cross-modal feature alignment and interaction, and " + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{text}}" + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": " generates textual tokens through autoregressive decoding. The resulting visual description " + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "inline_equation", + "content": "S = \\{w_{1},\\dots,w_{n}\\}" + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": " emerges from this multi-stage processing." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 71, + 553, + 238 + ], + "blocks": [ + { + "bbox": [ + 57, + 71, + 553, + 238 + ], + "lines": [ + { + "bbox": [ + 57, + 71, + 553, + 238 + ], + "spans": [ + { + "bbox": [ + 57, + 71, + 553, + 238 + ], + "type": "image", + "image_path": "3ae9a45583be9946e86dc7de188a71f381a9113b43215237e1f028a2f67cfac2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 244, + 555, + 281 + ], + "lines": [ + { + "bbox": [ + 54, + 244, + 555, + 281 + ], + "spans": [ + { + "bbox": [ + 54, + 244, + 555, + 281 + ], + "type": "text", + "content": "Figure 3. The pipeline of VaLiK: First, large-scale visual descriptions are generated using CoE-based VLMs. Then, a similarity verification mechanism is used to prune irrelevant information. Finally, MMKGs are constructed using LLMs based on LightRAG. The constructed MMKGs can assist LLMs in multimodal reasoning, alleviating the hallucination issues caused by incomplete knowledge." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "spans": [ + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "text", + "content": "However, quantitative analysis uncovers considerable discrepancies between machine-generated and human-annotated descriptions [88]. As an illustration, while utilizing BLIP-2 [41] to generate sample captions, we noted that the model outputs are markedly concise and devoid of visual specifics, as detailed in Appendix C. To bridge this gap, we implement CoE enhanced generation through cascade VLMs processing. At iteration step " + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "text", + "content": ", each expert " + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "text", + "content": " receives both the original visual signals " + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "text", + "content": " and the contextual output from the preceding expert " + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "inline_equation", + "content": "E_{i - 1}" + }, + { + "bbox": [ + 54, + 288, + 296, + 407 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 126, + 417, + 295, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 417, + 295, + 439 + ], + "spans": [ + { + "bbox": [ + 126, + 417, + 295, + 439 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {i} ^ {(t)} = E _ {i} \\left(I, \\mathcal {S} _ {i - 1} ^ {(t - 1)}\\right), \\tag {2}", + "image_path": "3062818bfab71156811bf5d45f1a407fa2df4f06a513f4ad9ca2606598e0e311.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "spans": [ + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "inline_equation", + "content": "S_{i - 1}^{(t - 1)}" + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "text", + "content": " denotes the description from expert " + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "inline_equation", + "content": "E_{i - 1}" + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "text", + "content": " at step " + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "inline_equation", + "content": "t - 1" + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "inline_equation", + "content": "S_0^{(t)}\\coloneqq \\emptyset" + }, + { + "bbox": [ + 55, + 448, + 295, + 475 + ], + "type": "text", + "content": " for initialization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 476, + 295, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 476, + 295, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 476, + 295, + 502 + ], + "type": "text", + "content": "Specifically, each expert " + }, + { + "bbox": [ + 55, + 476, + 295, + 502 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 55, + 476, + 295, + 502 + ], + "type": "text", + "content": " implements a unified visual-language processing task:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 503, + 185, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 503, + 185, + 514 + ], + "spans": [ + { + "bbox": [ + 55, + 503, + 185, + 514 + ], + "type": "text", + "content": "1. Visual Feature Extraction:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 524, + 295, + 539 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 524, + 295, + 539 + ], + "spans": [ + { + "bbox": [ + 125, + 524, + 295, + 539 + ], + "type": "interline_equation", + "content": "\\mathbf {V} _ {i} = \\operatorname {E n c} _ {\\text {v i s}} ^ {i} (I) \\in \\mathbb {R} ^ {d _ {v} \\times N _ {p}}, \\tag {3}", + "image_path": "41fbae9956bdbe831d0208c015c2756237ce428c21b43c97f3a94b8e57eae94b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "inline_equation", + "content": "\\mathsf{Enc}_{\\mathrm{vis}}^i" + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "text", + "content": " denotes established visual encoder [21, 30, 47] producing " + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "inline_equation", + "content": "N_{p}" + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "text", + "content": " patch embeddings with dimension " + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "inline_equation", + "content": "d_v" + }, + { + "bbox": [ + 67, + 548, + 295, + 573 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 574, + 249, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 574, + 249, + 585 + ], + "spans": [ + { + "bbox": [ + 55, + 574, + 249, + 585 + ], + "type": "text", + "content": "2. Cross-Modal Interaction and Generation:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "text", + "content": "VLMs integrate pretrained learnable query embeddings " + }, + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i\\in \\mathbb{R}^{d_q\\times L_q}" + }, + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "text", + "content": " to interact with visual features " + }, + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_i\\in" + }, + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{d_v\\times N_p}" + }, + { + "bbox": [ + 67, + 586, + 295, + 623 + ], + "type": "text", + "content": " via cross-attention [67]:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 631, + 295, + 680 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 631, + 295, + 680 + ], + "spans": [ + { + "bbox": [ + 83, + 631, + 295, + 680 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {H} _ {i} = \\operatorname {C r o s s A t t n} \\left(\\mathbf {Q} _ {i}, \\mathbf {V} _ {i}\\right) \\\\ = \\operatorname {s o f t m a x} \\left(\\frac {\\mathbf {Q} _ {i} \\mathbf {W} _ {q} ^ {i} \\left(\\mathbf {V} _ {i} \\mathbf {W} _ {k} ^ {i}\\right) ^ {\\top}}{\\sqrt {d _ {k}}}\\right) \\mathbf {V} _ {i} \\mathbf {W} _ {v} ^ {i}, \\tag {4} \\\\ \\end{array}", + "image_path": "09ce5c827fe27b873450b6d2c1fba26985bd7235d4c6e26d0aa70b1fdb91b127.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_q^i\\in \\mathbb{R}^{d_q\\times d_k}" + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_k^i" + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_v^i\\in \\mathbb{R}^{d_v\\times d_k}" + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "inline_equation", + "content": "L_{q}" + }, + { + "bbox": [ + 67, + 688, + 296, + 714 + ], + "type": "text", + "content": " denotes the predefined query length. Cross-attention serves" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 325, + 288, + 553, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 288, + 553, + 324 + ], + "spans": [ + { + "bbox": [ + 325, + 288, + 553, + 324 + ], + "type": "text", + "content": "as a prevalent approach, while other interaction strategies coexist [4]. The adopted VLMs in our implementation primarily rely on this approach for modality fusion." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 326, + 403, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 326, + 403, + 336 + ], + "spans": [ + { + "bbox": [ + 314, + 326, + 403, + 336 + ], + "type": "text", + "content": "3. Text Generation:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "spans": [ + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "type": "text", + "content": "The text encoder " + }, + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "type": "inline_equation", + "content": "\\mathsf{Enc}_{\\mathrm{text}}^{i}" + }, + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "type": "text", + "content": " first processes the preceding expert's output " + }, + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "type": "inline_equation", + "content": "S_{i - 1}^{(t - 1)}" + }, + { + "bbox": [ + 325, + 336, + 553, + 365 + ], + "type": "text", + "content": " into latent features:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 375, + 374, + 553, + 390 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 374, + 553, + 390 + ], + "spans": [ + { + "bbox": [ + 375, + 374, + 553, + 390 + ], + "type": "interline_equation", + "content": "\\mathbf {P} _ {i} = \\operatorname {E n c} _ {\\text {t e x t}} ^ {i} \\left(S _ {i - 1} ^ {(t - 1)}\\right) \\in \\mathbb {R} ^ {d _ {t} \\times L}. \\tag {5}", + "image_path": "e5a8193224fd6a409dbb805babf77da1861be7852ca533ec2b2ea659d41fb770.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "spans": [ + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "text", + "content": "Subsequently, the text decoder " + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "inline_equation", + "content": "\\mathrm{Dec}_{\\mathrm{text}}^{i}" + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "text", + "content": " synthesizes the final output " + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "inline_equation", + "content": "S_{i}^{(t)}" + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "text", + "content": " by jointly conditioning on " + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_i" + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_i" + }, + { + "bbox": [ + 326, + 401, + 553, + 428 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 339, + 447, + 553, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 447, + 553, + 464 + ], + "spans": [ + { + "bbox": [ + 339, + 447, + 553, + 464 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {i} ^ {(t)} = \\operatorname {D e c} _ {\\text {t e x t}} ^ {i} \\left(\\mathbf {P} _ {i}, \\mathbf {H} _ {i}\\right) = \\left\\{w _ {1} ^ {(t, i)}, \\dots , w _ {m} ^ {(t, i)} \\right\\}. \\tag {6}", + "image_path": "f2b3f86b15576932ab020991c6fe949147864e448d956a2befbbe1e596ebc116.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "spans": [ + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "text", + "content": "Ultimately, the final textual description " + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "inline_equation", + "content": "S_N^{(C)}" + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "text", + "content": " is obtained after " + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "text", + "content": " iteration steps through " + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 473, + 553, + 499 + ], + "type": "text", + "content": " cascaded experts." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 507, + 504, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 507, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 313, + 507, + 504, + 520 + ], + "type": "text", + "content": "3.2. Cross-Modal Similarity Verification" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "text", + "content": "To address noise in VLM-generated captions, we design a sliding window mechanism with semantic consistency verification. This method ensures that only relevant and semantically consistent segments are retained in the final description. Let " + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "inline_equation", + "content": "W_{k}" + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "text", + "content": "-th window containing " + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "text", + "content": " consecutive tokens " + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "inline_equation", + "content": "\\{w_{km + 1},\\dots ,w_{(k + 1)m}\\}" + }, + { + "bbox": [ + 313, + 525, + 555, + 609 + ], + "type": "text", + "content": ". For each window, we compute its cross-modal similarity score:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 364, + 617, + 553, + 645 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 617, + 553, + 645 + ], + "spans": [ + { + "bbox": [ + 364, + 617, + 553, + 645 + ], + "type": "interline_equation", + "content": "\\alpha_ {k} = \\frac {\\operatorname {E n c} _ {\\text {v i s}} (I) \\cdot \\operatorname {E n c} _ {\\text {t e x t}} \\left(W _ {k}\\right)}{\\| \\operatorname {E n c} _ {\\text {v i s}} (I) \\| \\| \\operatorname {E n c} _ {\\text {t e x t}} \\left(W _ {k}\\right) \\|}, \\tag {7}", + "image_path": "d0ca33cc3a3e03e4e8ea5575cfe6a04a4fc3f8aae7966ec38050d582e149390d.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "type": "inline_equation", + "content": "\\mathsf{Enc}_{vis/text}(\\cdot)" + }, + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "type": "text", + "content": " adopts a lightweight CLIP [59] encoder-decoder with frozen parameters for efficient processing. The similarity score " + }, + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "type": "inline_equation", + "content": "\\alpha_{k}" + }, + { + "bbox": [ + 313, + 653, + 555, + 712 + ], + "type": "text", + "content": " lies within the range [0, 1], with higher values indicating a stronger alignment between the visual and textual information." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": "After calculating the cross-modal similarity for each window, we employ an empirical threshold " + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": " to filter out low-similarity windows. This threshold helps to identify and discard noisy or irrelevant sections of the generated caption that do not align well with the visual content, thereby reducing the impact of inaccurate or misleading descriptions. Formally, for each window " + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "inline_equation", + "content": "W_{k}" + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "inline_equation", + "content": "\\alpha_{k} < \\tau" + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": ", the window is discarded as noise. This process effectively prunes windows with low similarity scores, ensuring that only semantically meaningful segments remain. The final denoised description " + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": " is obtained by concatenating all windows " + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "inline_equation", + "content": "W_{k}" + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": " for which " + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "inline_equation", + "content": "\\alpha_{k} \\geq \\tau" + }, + { + "bbox": [ + 55, + 72, + 296, + 216 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 143, + 221, + 296, + 248 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 221, + 296, + 248 + ], + "spans": [ + { + "bbox": [ + 143, + 221, + 296, + 248 + ], + "type": "interline_equation", + "content": "\\hat {S} = \\bigcup_ {\\alpha_ {k} \\geq \\tau} W _ {k}. \\tag {8}", + "image_path": "53b3c1ef64a002a98e518980197c31d8968d68df2609a7612ca99b1b774dfcdb.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 254, + 296, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 254, + 296, + 278 + ], + "spans": [ + { + "bbox": [ + 55, + 254, + 296, + 278 + ], + "type": "text", + "content": "Our window size " + }, + { + "bbox": [ + 55, + 254, + 296, + 278 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 55, + 254, + 296, + 278 + ], + "type": "text", + "content": " is flexibly determined and generally adapts dynamically to natural sentence segmentation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 284, + 296, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 284, + 296, + 297 + ], + "spans": [ + { + "bbox": [ + 55, + 284, + 296, + 297 + ], + "type": "text", + "content": "3.3. MMKG Construction for Enhanced Reasoning" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "spans": [ + { + "bbox": [ + 55, + 301, + 296, + 409 + ], + "type": "text", + "content": "LLMs have become increasingly popular for identifying entities, relationships, and attributes within a corpus, which are then organized into a KG. The strength of LLM-based KG generation lies in its capacity to leverage the vast amount of knowledge encoded within these models, allowing them to detect complex and nuanced patterns across diverse data sources. This approach eliminates the need for manual annotation, enabling a highly scalable and domain-adaptive process suitable for a wide range of applications." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "spans": [ + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "type": "text", + "content": "We begin by refining the generated textual description " + }, + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "type": "text", + "content": " (VLM-based information), which is then optionally concatenated with any available external textual knowledge " + }, + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 410, + 296, + 493 + ], + "type": "text", + "content": " to form the input for KG generation. This combined input is used to generate MMKGs with the help of a LLM [22, 28], leveraging its capacity for multi-hop reasoning and dynamic knowledge integration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 501, + 295, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 501, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 501, + 295, + 521 + ], + "type": "interline_equation", + "content": "\\mathcal {G} = \\operatorname {L L M} (\\hat {S} \\oplus T), \\tag {9}", + "image_path": "b7c15acd35fb4773bd57aa71f9b7d1854155539b0a304cfacd0f1f8f156ffa59.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "text", + "content": " denotes optional concatenation based on the availability of " + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "text", + "content": ". The resulting graph " + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 55, + 523, + 295, + 559 + ], + "type": "text", + "content": " captures both visual and textual relationships inferred by the LLM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 559, + 195, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 195, + 571 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 195, + 571 + ], + "type": "text", + "content": "We define " + }, + { + "bbox": [ + 67, + 559, + 195, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 67, + 559, + 195, + 571 + ], + "type": "text", + "content": " as a set of triplets:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 578, + 295, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 295, + 592 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 295, + 592 + ], + "type": "interline_equation", + "content": "\\mathcal {G} = \\{(h, r, t) \\mid h, t \\in \\mathcal {E}, r \\in \\mathcal {R} \\}, \\tag {10}", + "image_path": "484545c159fe844bb510c12ebc2cd4c6f052d098b4a136c90ac20cca5181cd7d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "spans": [ + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": " denote the sets of entities and relations. Entities include objects or concepts from the image or external text, while relations describe connections such as \"is a type of,\" \"part of,\" or \"has property.\" Each triplet " + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "inline_equation", + "content": "(h,r,t)" + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": " links a head entity " + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": " and a tail entity " + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": " via relation " + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 55, + 597, + 296, + 658 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 658, + 296, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 658, + 296, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 658, + 296, + 694 + ], + "type": "text", + "content": "Multimodal Reasoning Enhancement. To support multimodal reasoning, we retrieve relevant triplets from " + }, + { + "bbox": [ + 55, + 658, + 296, + 694 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 55, + 658, + 296, + 694 + ], + "type": "text", + "content": " through structural patterns during LLMs inference:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 700, + 295, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 700, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 132, + 700, + 295, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {G} _ {q} = \\operatorname {R e t r i e v e} (q, \\mathcal {G}), \\tag {11}", + "image_path": "53202978704d71375752ea29410832e57b68988c7b72a4f9edd34de6f0500125.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "type": "inline_equation", + "content": "\\text{Retrieve}(\\cdot)" + }, + { + "bbox": [ + 313, + 72, + 553, + 108 + ], + "type": "text", + "content": " denotes a retrieval strategy that identifies subgraphs relevant to the query for reasoning. Detailed retrieval strategies are described in Appendix D." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 326, + 109, + 553, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 109, + 553, + 121 + ], + "spans": [ + { + "bbox": [ + 326, + 109, + 553, + 121 + ], + "type": "text", + "content": "The augmented prompt integrates multimodal evidence:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 361, + 131, + 553, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 131, + 553, + 159 + ], + "spans": [ + { + "bbox": [ + 361, + 131, + 553, + 159 + ], + "type": "interline_equation", + "content": "p _ {\\mathrm {a u g}} = q \\left\\|\\left(\\bigcup_ {(h, r, t) \\in \\mathcal {G} _ {q}} [ h ] \\rightarrow r \\rightarrow [ t ]\\right). \\right. \\tag {12}", + "image_path": "65280c81158ceeaf1600375296559fa3912dc5fb2ff19e0b0677dd1e25dc78a1.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 163, + 555, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 163, + 555, + 248 + ], + "spans": [ + { + "bbox": [ + 313, + 163, + 555, + 248 + ], + "type": "text", + "content": "Note that we incorporate the storage locations of images in the database during MMKGs construction, enabling the MMKGs to link to visual data. VaLiK enables text-only LLMs to perform multimodal reasoning through " + }, + { + "bbox": [ + 313, + 163, + 555, + 248 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 313, + 163, + 555, + 248 + ], + "type": "text", + "content": "'s visual associations, while VLMs refresh knowledge representations by jointly injecting both visual and textual information, significantly mitigating hallucination risks." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 259, + 390, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 259, + 390, + 272 + ], + "spans": [ + { + "bbox": [ + 313, + 259, + 390, + 272 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 279, + 368, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 279, + 368, + 293 + ], + "spans": [ + { + "bbox": [ + 313, + 279, + 368, + 293 + ], + "type": "text", + "content": "4.1. Setups" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 297, + 553, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 297, + 553, + 321 + ], + "spans": [ + { + "bbox": [ + 313, + 297, + 553, + 321 + ], + "type": "text", + "content": "Evaluation Datasets. We evaluate VaLiK on two multimodal reasoning benchmarks with distinct characteristics:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 322, + 553, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 322, + 553, + 417 + ], + "spans": [ + { + "bbox": [ + 314, + 322, + 553, + 417 + ], + "type": "text", + "content": "- CrisisMMD [3]. This real-world disaster response dataset includes around 35,000 noisy social media postings with paired images and text, each annotated for seven catastrophe categories and four severity levels. Its realistic user-generated content with natural noise and implicit modality correlations provides a rigorous testbed for zero-shot adaptation, with good performance indicating practical relevance in real-world crisis scenarios." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 418, + 554, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 418, + 554, + 513 + ], + "spans": [ + { + "bbox": [ + 314, + 418, + 554, + 513 + ], + "type": "text", + "content": "- ScienceQA [48]. This dataset contains 21,208 multimodal science questions combining textual and visual contexts, with " + }, + { + "bbox": [ + 314, + 418, + 554, + 513 + ], + "type": "inline_equation", + "content": "48.7\\%" + }, + { + "bbox": [ + 314, + 418, + 554, + 513 + ], + "type": "text", + "content": " of instances containing images. Questions span physics, chemistry, and biology domains, requiring cross-modal reasoning between textual concepts and visual diagrams. Additionally, ScienceQA offers image captions to aid text-only LLMs in reasoning, allowing a comparison of unimodal approaches." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 514, + 555, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 555, + 633 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 555, + 633 + ], + "type": "text", + "content": "Task Formulation. For CrisisMMD, we define three multimodal classification tasks1: (1) binary information relevance filtering, (2) fine-grained humanitarian category recognition, and (3) a consolidated taxonomy with merged categories to reduce label complexity. We omit the unimodal damage assessment to focus on multimodal aspects. For ScienceQA, we follow the original evaluation using multiple metrics: question types, contextual modalities, and educational stages. Performance is assessed through accuracy percentage across these categories." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 633, + 553, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 633, + 553, + 669 + ], + "spans": [ + { + "bbox": [ + 313, + 633, + 553, + 669 + ], + "type": "text", + "content": "Baselines. We conduct a comprehensive evaluation of text-only LLMs, multimodal VLMs, and KGs that enhance LLMs in multimodal reasoning." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 670, + 553, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 670, + 553, + 694 + ], + "spans": [ + { + "bbox": [ + 314, + 670, + 553, + 694 + ], + "type": "text", + "content": "- For CrisisMMD, we compare text-only LLMs using few-shot prompting (LLaMA-2 [66], GPT-4 [2]," + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 702, + 553, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 702, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 324, + 702, + 553, + 713 + ], + "type": "text", + "content": "This setting references the repository GitHub and Abavisani et al. [1]" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 553, + 144 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 553, + 144 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 553, + 144 + ], + "type": "table", + "html": "
TaskText-only LLMsKG-Enhanced LLMs
LLaMA-2GPT-4DeepSeek-R1Qwen2.5LightRAGVaLiK
7B13B70B-7B8B32B70B7B32B72BText-onlyImage-onlyText-Image
Task 162.3263.8063.1566.8367.2363.3163.6165.5365.0467.2867.9567.4969.5268.90
Task 218.3221.8228.8747.2526.5325.4924.7721.0544.5246.9450.5145.1149.5450.02
Task 2 Merged21.4533.1536.8949.4425.8523.5621.5525.5745.3347.0750.2945.9449.0750.69
", + "image_path": "4afcd56b1fa4990fa61cf4b7cfa0d337feceab24bf56a82c3aa78cb2285b2360.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 58, + 195, + 563, + 277 + ], + "blocks": [ + { + "bbox": [ + 55, + 152, + 555, + 185 + ], + "lines": [ + { + "bbox": [ + 55, + 152, + 555, + 185 + ], + "spans": [ + { + "bbox": [ + 55, + 152, + 555, + 185 + ], + "type": "text", + "content": "Table 1. The performance evaluation of text-only LLMs using few-shot prompting without any fine-tuning on the training set. As these models handle text only, test data is formatted as unimodal text for compatibility. In our implementations, both LightRAG and VaLiK adopt Qwen2.5-7B as the base reasoning model. Bold indicates the highest value, and underline indicates the second highest." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 195, + 563, + 277 + ], + "lines": [ + { + "bbox": [ + 58, + 195, + 563, + 277 + ], + "spans": [ + { + "bbox": [ + 58, + 195, + 563, + 277 + ], + "type": "table", + "html": "
TaskMultimodal VLMsKG-Enhanced LLMs
CLIPLLaVABLIP-2GPT-4oQwen2-VLVaLiK
ViT-L/147B13B34BFlan-T5-XLOPT-2B-I7B-I72B-I*#+~
Task 143.3654.0060.5856.4461.2938.6268.2047.5662.4565.8060.7868.4461.1168.89
Task 217.8828.0120.1425.1540.8614.2647.587.6032.6847.2125.8048.8827.2349.78
Task 2-M20.7930.6123.4425.0740.7214.2749.557.4234.2048.2827.3149.2729.0949.31
", + "image_path": "daad939622d1365740ad713921e37ab22dcfbea1497d31a40eb87a78fb1f9c75.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 285, + 555, + 319 + ], + "lines": [ + { + "bbox": [ + 55, + 285, + 555, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 285, + 555, + 319 + ], + "type": "text", + "content": "Table 2. The performance of multimodal VLMs and KG-enhanced LLMs. The -I suffix denotes instruction-tuned variants. Symbol markers denote KG types and models: the asterisk (*) represents image-only KG with LLaVA-34B, hash (#) indicates image-only KG using Qwen2-VL-72B-I, plus (+) denotes text-image KG with LLaVA-34B, and tilde (\\*) shows text-image KG using Qwen2-VL-72B-I." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 63, + 339, + 294, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 339, + 294, + 374 + ], + "spans": [ + { + "bbox": [ + 63, + 339, + 294, + 374 + ], + "type": "text", + "content": "DeepSeek-R1 [26], Qwen-2.5 [77]) and multimodal VLMs (CLIP [57], LLaVA [43], GPT-4o [33], Qwen2-VL [69], BLIP-2 [41])." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 376, + 295, + 507 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 55, + 376, + 295, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 295, + 459 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 295, + 459 + ], + "type": "text", + "content": "- For ScienceQA, we compare models for general domains in zero/few-shot settings, including text-only LLMs (GPT Model [48], CoT [48], DDCoT [86]), multimodal VLMs (LG-VQA [25], LaVIN [50], BLIP-2, CCOT [53], GraphVis [19]) and Tool-LLM Chameleon [49]. These models are not specifically fine-tuned for scientific tasks, ensuring a fair evaluation of generalization capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 460, + 295, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 460, + 295, + 507 + ], + "spans": [ + { + "bbox": [ + 55, + 460, + 295, + 507 + ], + "type": "text", + "content": "- We further compare the multimodal reasoning performance of LLMs assisted by KGs, evaluating text-based KGs built with LightRAG [28], and pre-constructed MMKGs such as Visual Genome [38] and Mmkg [46]." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "type": "text", + "content": "Implementation. For MMKG construction, we design a chain of VLMs including BLIP-2, LLaVA, and Qwen2-VL, with the CLIP-ViT-L/14 for pruning. Stronger or additional VLMs could be employed to enhance performance if more computational resources are available. We use the entire training set as the knowledge base and construct MMKGs from the extracted descriptions based on the LightRAG framework. In comparative experiments, the LightRAG method we evaluate utilizes only textual data, while VaLiK employs two configurations: (1) fully image-generated text descriptions (Image-only), and (2) original text combined with image-generated text (Text-Image). Dynamic window partitioning based on sentence length ensures syntactically coherent pruning results. Similarity thresholds are set to " + }, + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\tau = 0.25" + }, + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "type": "text", + "content": " for CrisisMMD and " + }, + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\tau = 0.20" + }, + { + "bbox": [ + 54, + 510, + 295, + 713 + ], + "type": "text", + "content": " for ScienceQA based on empirical evaluations to balance precision and recall. See Appendix E for selection details. We construct the" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 339, + 555, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 339, + 555, + 399 + ], + "spans": [ + { + "bbox": [ + 313, + 339, + 555, + 399 + ], + "type": "text", + "content": "graph using DeepSeek-R1-70B and implement LightRAG's hybrid retrieval approach with Qwen2.5-7B. For graph construction and multimodal reasoning, we utilize " + }, + { + "bbox": [ + 313, + 339, + 555, + 399 + ], + "type": "inline_equation", + "content": "1 \\times" + }, + { + "bbox": [ + 313, + 339, + 555, + 399 + ], + "type": "text", + "content": " NVIDIA A100-80GB GPUs. Task-specific prompts are designed to assist LLMs in multimodal reasoning evaluation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 407, + 400, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 407, + 400, + 419 + ], + "spans": [ + { + "bbox": [ + 313, + 407, + 400, + 419 + ], + "type": "text", + "content": "4.2. Main Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "text", + "content": "Multimodal Classification Tasks. We conduct multimodal classification experiments on the CrisisMMD dataset, evaluating both text-only LLMs and multimodal VLMs. Detailed comparative results are provided in Tables 1 and 2. For text-only LLMs, we adopt Qwen2.5-7B as the foundational reasoning model. Remarkably, the VaLiK-enhanced version achieves state-of-the-art (SOTA) performance matching that of the native Qwen2.5-72B model. The image-only KG constructed through VaLiK demonstrates an average accuracy improvement of " + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "inline_equation", + "content": "4.41\\%" + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "text", + "content": " across tasks, with the text-image variant attaining a " + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "inline_equation", + "content": "4.90\\%" + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "text", + "content": " enhancement. These improvements significantly surpass the " + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "inline_equation", + "content": "1.22\\%" + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "text", + "content": " gain obtained by LightRAG using textual KG. We further validate VaLiK's cross-scale applicability through evaluations on Qwen2.5-32B and 72B architectures, observing consistent " + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "inline_equation", + "content": "2.0\\% - 2.5\\%" + }, + { + "bbox": [ + 313, + 426, + 555, + 653 + ], + "type": "text", + "content": " improvements. While not as significant as the 7B model's benefits, this shows that models that have substantial prior knowledge benefit less from external knowledge augmentation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 653, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 554, + 713 + ], + "type": "text", + "content": "Unlike text-only LLMs that depend on MMKGs for visual understanding, VLMs primarily benefit from KGs integration through outdated knowledge refreshment. Due to the inherent availability of visual features during inference, VaLiK's performance gains for VLMs remain con" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 553, + 338 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 553, + 338 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 553, + 338 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 553, + 338 + ], + "type": "table", + "html": "
Method#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Human [48]-90.2384.9787.4889.6087.5088.1091.5982.4288.40
GPT-4 [43]-84.0673.4587.3681.8770.7590.7384.6979.1082.69
CoT (GPT-3) [48]173B75.4470.8778.0974.6867.4379.9378.2369.6875.17
CoT (UnifiedQA) [48]223M71.0076.0478.9166.4266.5381.8177.0668.8274.11
CoT (GPT-4) [49]1T+85.4872.4490.2782.6571.4992.8986.6679.0483.99
DDCoT [86]175B80.1576.7282.8278.8972.5385.0282.8675.2180.15
Chameleon (ChatGPT) [49]175B+81.6270.6484.0079.7770.8086.6281.8676.5379.93
LG-VQA (BLIP-2) [25]---------86.32
LaVIN-13B [78]---------77.54
BLIP-2 [78]---------74.17
CCOT7B--------76.84
GraphVis [19]7B--------73.18
Qwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
Qwen2.5-72B72B79.6467.1084.9077.5665.0087.9380.2574.8578.37
Qwen2.5-7B (Mmkg) [46]7B73.9866.3778.1871.6564.3079.6576.5168.0373.47
Qwen2.5-7B (Visual Genome) [38]7B76.7867.0478.0974.0566.1979.7278.0869.6875.08
Qwen2.5-7B (VaLiK Text-only)7B84.5474.2486.9182.7472.5390.0384.5180.2882.98
Qwen2.5-7B (VaLiK Image-only)7B79.1471.5479.2777.1669.7283.1480.6573.9678.88
Qwen2.5-7B (VaLiK Text-Image)7B84.1575.1487.6482.9973.1889.6984.4080.9583.16
Qwen2.5-72B (VaLiK Text-Image)72B85.6175.9390.2784.4074.1792.3385.7982.9884.77
", + "image_path": "4ed5640ca4dff132eb932d23112990edf9118e03775355bded4484c067665b93.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 346, + 555, + 392 + ], + "lines": [ + { + "bbox": [ + 55, + 346, + 555, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 346, + 555, + 392 + ], + "type": "text", + "content": "Table 3. Performance comparison (\\%) on ScienceQA benchmark. #T-Params denotes trainable parameters. Categories: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG-Cap (image caption), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12). Method groups: (1) Human performance baseline, (2) Zero/Few-shot text-only LLMs, (3) Zero/Few-shot Multimodal VLMs, (4) LLMs enhanced with knowledge graphs for multimodal reasoning." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "spans": [ + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "text", + "content": "strained compared to text-only counterparts. We separately applied VaLiK enhancement to Qwen2-VL-72B-Instruct and LLaVA-34B, obtaining distinct improvements: LLaVA-34B achieves accuracy gains of " + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "inline_equation", + "content": "2.41\\%" + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "text", + "content": " (image-only KG) and " + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "inline_equation", + "content": "3.59\\%" + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "text", + "content": " (text-image KG), while Qwen2-VL-72B-Instruct shows " + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "inline_equation", + "content": "1.77\\%" + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "inline_equation", + "content": "2.23\\%" + }, + { + "bbox": [ + 54, + 412, + 294, + 579 + ], + "type": "text", + "content": " improvements respectively under identical configurations. These experimental findings collectively demonstrate that VaLiK effectively extracts valuable signals from the training corpus and enables dynamic knowledge injection into VLMs during inference, thereby substantially alleviating hallucination phenomena. The differential improvements between Qwen2-VL-72B-Instruct and LLaVA-34B further validate the framework's adaptability across model architectures." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 580, + 295, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 580, + 295, + 700 + ], + "spans": [ + { + "bbox": [ + 55, + 580, + 295, + 700 + ], + "type": "text", + "content": "Additionally, we analyze the results of LLMs without KG enhancement in the tables, which generally follow the scaling law [37]. However, DeepSeek-R1 shows anomalous behavior. Through testing, we find that its reasoning process may introduce complex information that interferes with its judgment. Furthermore, empirical results show that most baseline models achieve suboptimal performance without fine-tuning. In contrast, VaLiK's automated MMKG construction framework requires no task-specific adaptation yet delivers consistent improvements." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 701, + 294, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 701, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 701, + 294, + 713 + ], + "type": "text", + "content": "Multimodal Question Answering Tasks. We evalu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "spans": [ + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "type": "text", + "content": "ated multimodal QA performance on the ScienceQA benchmark with Qwen2.5-7B and Qwen2.5-72B as base architectures, augmented by four knowledge sources: Mmkg, Visual Genome, text-only LightRAG and VaLiK. Compared to existing zero-shot/few-shot LLMs that not specifically optimized for scientific QA, our VaLiK-enhanced Qwen2.5-72B achieved SOTA performance on " + }, + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "type": "inline_equation", + "content": "62.5\\%" + }, + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "type": "text", + "content": " of subtasks, demonstrating particular strengths in multimodal reasoning scenarios requiring cross-modal alignment with an average accuracy gain of " + }, + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "type": "inline_equation", + "content": "6.4\\%" + }, + { + "bbox": [ + 313, + 412, + 553, + 532 + ], + "type": "text", + "content": " over baseline models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "type": "text", + "content": "Our study identifies a fundamental imbalance between textual and visual knowledge representations in ScienceQA. Text-only KGs (14k entities, 18k relations) exhibit " + }, + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "type": "inline_equation", + "content": "8 \\times" + }, + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "type": "text", + "content": " denser structured knowledge than image-only counterparts (3k concepts, 1k relations), explaining visual modality underperformance. Despite this gap, vision-KG-augmented Qwen2.5-7B still attains " + }, + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "type": "inline_equation", + "content": "4.16\\%" + }, + { + "bbox": [ + 313, + 533, + 555, + 689 + ], + "type": "text", + "content": " accuracy gains over its non-enhanced version. Notably, our MMKG requires only 489MB storage for complete storage, while the scene graph component2 of Visual Genome alone occupies 739MB. This lightweight construction enables effective reasoning using only textual KG descriptions without raw images in resource-constrained scenarios." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 325, + 702, + 382, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 702, + 382, + 712 + ], + "spans": [ + { + "bbox": [ + 325, + 702, + 382, + 712 + ], + "type": "text", + "content": "2Visual Genome" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 555, + 152 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 555, + 152 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 555, + 152 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 555, + 152 + ], + "type": "table", + "html": "
TypeMethod#T-ParamSubjectContext ModalityGradeAverage
NATSOCLANTXTIMGNOG1-6G7-12
Image-OnlyQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B80.06 (↑3.86)70.30 (↑2.47)80.55 (↑3.28)78.05 (↑3.56)68.43 (↑2.64)83.76 (↑4.74)81.17 (↑3.45)72.71 (↑3.36)78.14 (↑3.42)
+ SV7B79.14 (↓0.92)71.54 (↑1.24)79.27 (↓1.28)77.16 (↓0.89)69.72 (↑1.29)83.14 (↓0.62)80.65 (↓0.52)73.96 (↑1.25)78.88 (↑0.74)
Text-ImageQwen2.5-7B7B76.2067.8377.2774.4965.7979.0277.7269.3574.72
+ CVs7B81.88 (↑5.68)73.00 (↑5.17)84.00 (↑6.73)80.55 (↑6.06)70.05 (↑4.26)87.11 (↑8.09)82.01 (↑4.29)77.98 (↑8.63)80.57 (↑5.85)
+ SV7B84.15 (↑2.27)75.14 (↑2.14)87.64 (↑3.64)82.99 (↑2.44)73.18 (↑3.13)89.69 (↑2.58)84.40 (↑2.39)80.95 (↑2.97)83.16 (↑2.59)
", + "image_path": "4c07c1b06d4321725777668e77059d3bd720c3641b208a370e2a8d113fdb9bbe.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 58, + 209, + 297, + 282 + ], + "blocks": [ + { + "bbox": [ + 55, + 166, + 555, + 201 + ], + "lines": [ + { + "bbox": [ + 55, + 166, + 555, + 201 + ], + "spans": [ + { + "bbox": [ + 55, + 166, + 555, + 201 + ], + "type": "text", + "content": "Table 4. Ablation study on ScienceQA benchmark (CVs: CoE-based Vision-Language Models; SV: Similarly Verification). Performance metrics include: NAT (natural science), SOC (social science), LAN (language), TXT (text context), IMG (image context), NO (no context), G1-6 (grades 1-6), G7-12 (grades 7-12)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 209, + 297, + 282 + ], + "lines": [ + { + "bbox": [ + 58, + 209, + 297, + 282 + ], + "spans": [ + { + "bbox": [ + 58, + 209, + 297, + 282 + ], + "type": "table", + "html": "
TypeMethodTask 1 (%)Task 2 (%)Task 2-Merged (%)
Image-OnlyQwen2.5-7B65.0444.5245.33
+ CVs68.11 (↑3.07)47.00 (↑2.48)46.95 (↑1.62)
+ SV69.52 (↑1.41)49.54 (↑2.54)49.07 (↑2.12)
Text-ImageQwen2.5-7B65.0444.5245.33
+ CVs68.43 (↑3.39)48.61 (↑4.09)48.97 (↑3.64)
+ SV68.90 (↑0.47)50.02 (↑1.41)50.69 (↑1.72)
", + "image_path": "a0c8d9c34fe5e9db284faa164d7f4985af9b1ae1e7b21f3d54728b3d6c9b0494.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 297, + 282, + 308 + ], + "lines": [ + { + "bbox": [ + 68, + 297, + 282, + 308 + ], + "spans": [ + { + "bbox": [ + 68, + 297, + 282, + 308 + ], + "type": "text", + "content": "Table 5. Ablation study on CrisisMMD with Qwen2.5-7B." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 320, + 149, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 320, + 149, + 334 + ], + "spans": [ + { + "bbox": [ + 55, + 320, + 149, + 334 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "spans": [ + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "type": "text", + "content": "Our ablation studies on CrisisMMD and ScienceQA demonstrate the specific roles of VaLiK's components. As shown in Table 4 and Table 5, the CVs (CoE-based VLM) module improves accuracy across all settings, with average gains of " + }, + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "type": "inline_equation", + "content": "+3.05\\%" + }, + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "type": "text", + "content": " on CrisisMMD and " + }, + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "type": "inline_equation", + "content": "+4.63\\%" + }, + { + "bbox": [ + 55, + 338, + 296, + 615 + ], + "type": "text", + "content": " on ScienceQA tasks, validating visual descriptions enhance reasoning. However, the SV (Similarly Verification) module exhibits dual effects: it significantly improves CrisisMMD metrics by pruning redundant textual descriptions, yet slightly degrades ScienceQA's image-only natural science reasoning. We hypothesize this discrepancy arises from dataset characteristics: CrisisMMD's generated captions contain substantially more redundant content, whereas ScienceQA's simpler visual scenes yield shorter descriptions. Pruning these shorter descriptions risks over-removal of critical semantics. Furthermore, different types of KGs influence the effectiveness of the components: CVs achieve greater gains in CrisisMMD's text-image fusion as original text provides complementary context, while SV shows reduced effectiveness, likely due to occasional over-pruning of cross-modal linkages. Nevertheless, both modules collectively enhance performance across configurations, demonstrating their synergistic yet context-sensitive nature." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 624, + 158, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 158, + 636 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 158, + 636 + ], + "type": "text", + "content": "4.4. Further Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": "Impact of VLM Quantity and Types. We evaluate the impact of varying quantities and types of VLMs on the CVs module. Our experiments reveal that Qwen2-VL generates the most visual descriptions, followed by LLaVA, while BLIP-2 produces the fewest. However, BLIP-2 demonstrates superior capability in extracting critical information" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 316, + 209, + 435, + 297 + ], + "blocks": [ + { + "bbox": [ + 316, + 209, + 435, + 297 + ], + "lines": [ + { + "bbox": [ + 316, + 209, + 435, + 297 + ], + "spans": [ + { + "bbox": [ + 316, + 209, + 435, + 297 + ], + "type": "image", + "image_path": "05342ce1e79f74e662270b7c353d9bd93d8429578bb671ee1bbfba5e163509a6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 306, + 542, + 319 + ], + "lines": [ + { + "bbox": [ + 325, + 306, + 542, + 319 + ], + "spans": [ + { + "bbox": [ + 325, + 306, + 542, + 319 + ], + "type": "text", + "content": "Figure 4. Impact analysis of VLM quantity on CrisisMMD." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 435, + 209, + 553, + 297 + ], + "blocks": [ + { + "bbox": [ + 435, + 209, + 553, + 297 + ], + "lines": [ + { + "bbox": [ + 435, + 209, + 553, + 297 + ], + "spans": [ + { + "bbox": [ + 435, + 209, + 553, + 297 + ], + "type": "image", + "image_path": "3790fc9cc3c4a05154d2d0fc160ef6d6d27e8609eb80567e719a7f54aa2e6a58.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 327, + 555, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 327, + 555, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 327, + 555, + 422 + ], + "type": "text", + "content": "and identifying key entity relationships within images. We therefore adopt BLIP-2 as the primary model, with LLaVA or Qwen2-VL serving as secondary/tertiary components. Adding more VLMs yields diminishing returns, due to limited entities in current images, though we hypothesize their benefits would increase for complex visual scenes with richer semantic content. This phenomenon is empirically validated by our quantitative results in Figure 4." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 425, + 555, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 425, + 555, + 473 + ], + "spans": [ + { + "bbox": [ + 313, + 425, + 555, + 473 + ], + "type": "text", + "content": "Computational Costs. Due to space limitations, we provide an overview of VaLiK's computational costs in Appendix F. Our method is significantly more cost-effective than manual annotation or LLM fine-tuning." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 488, + 388, + 501 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 488, + 388, + 501 + ], + "spans": [ + { + "bbox": [ + 313, + 488, + 388, + 501 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 510, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 556, + 715 + ], + "type": "text", + "content": "Multimodal reasoning in LLMs is constrained by incomplete knowledge and hallucination artifacts, limitations that persist because textual KGs cannot bridge visual-textual semantics due to their modality isolation. To bridge this gap, we propose VaLiK, a framework for constructing MMKGs through vision-language alignment, eliminating dependency on manual annotations while resolving visual-textual semantic inconsistencies. By integrating a cascade of pretrained VLMs and cross-modal verification, VaLiK converts images into structured knowledge while filtering noise. The resulting graphs enhance LLMs' reasoning with minimal storage overhead. Experiments on multimodal reasoning benchmarks show SOTA performance. VaLiK's modular design supports adaptability across domains, offering a scalable solution for autonomous knowledge synthesis. This work advances multimodal AI systems by enabling efficient integration of visual and textual data." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 165, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 165, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 165, + 85 + ], + "type": "text", + "content": "6. Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 152 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 152 + ], + "type": "text", + "content": "The research was supported by Shanghai Artificial Intelligence Laboratory, the National Key R&D Program of China (Grant No. 2022ZD0160201) and the Science and Technology Commission of Shanghai Municipality (Grant No. 22DZ1100102)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 164, + 115, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 164, + 115, + 177 + ], + "spans": [ + { + "bbox": [ + 56, + 164, + 115, + 177 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 184, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 61, + 184, + 296, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 184, + 296, + 239 + ], + "spans": [ + { + "bbox": [ + 61, + 184, + 296, + 239 + ], + "type": "text", + "content": "[1] Mahdi Abavisani, Liwei Wu, Shengli Hu, Joel Tetreault, and Alejandro Jaimes. Multimodal categorization of crisis events in social media. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 240, + 296, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 240, + 296, + 295 + ], + "spans": [ + { + "bbox": [ + 61, + 240, + 296, + 295 + ], + "type": "text", + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 297, + 296, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 297, + 296, + 342 + ], + "spans": [ + { + "bbox": [ + 61, + 297, + 296, + 342 + ], + "type": "text", + "content": "[3] Firoj Alam, Ferda Ofli, and Muhammad Imran. Crisismmd: Multimodal twitter datasets from natural disasters. Proceedings of the International AAAI Conference on Web and Social Media, 12(1), 2018. 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 343, + 296, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 343, + 296, + 463 + ], + "spans": [ + { + "bbox": [ + 61, + 343, + 296, + 463 + ], + "type": "text", + "content": "[4] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob L Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikol aj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karén Simonyan. Flamingo: a visual language model for few-shot learning. In Advances in Neural Information Processing Systems, pages 23716-23736. Curran Associates, Inc., 2022. 3, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 465, + 296, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 465, + 296, + 498 + ], + "spans": [ + { + "bbox": [ + 62, + 465, + 296, + 498 + ], + "type": "text", + "content": "[5] Razvan Azamfirei, Sapna R Kudchadkar, and James Fackler. Large language models and the perils of their hallucinations. Critical Care, 27(1):120, 2023. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 500, + 296, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 500, + 296, + 554 + ], + "spans": [ + { + "bbox": [ + 62, + 500, + 296, + 554 + ], + "type": "text", + "content": "[6] Jinheon Baek, Alham Fikri Aji, and Amir Saffari. Knowledge-augmented language model prompting for zero-shot knowledge graph question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 556, + 296, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 556, + 296, + 600 + ], + "spans": [ + { + "bbox": [ + 62, + 556, + 296, + 600 + ], + "type": "text", + "content": "[7] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. Multimodal machine learning: A survey and taxonomy. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41(2):423-443, 2019. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 602, + 296, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 602, + 296, + 668 + ], + "spans": [ + { + "bbox": [ + 62, + 602, + 296, + 668 + ], + "type": "text", + "content": "[8] Dawei Chen, Zhixu Li, Binbin Gu, and Zhigang Chen. Multimodal named entity recognition with image attributes and image knowledge. In Database Systems for Advanced Applications: 26th International Conference, DASFAA 2021, Taipei, Taiwan, April 11–14, 2021, Proceedings, Part II 26, pages 186–201. Springer, 2021. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 670, + 296, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 670, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 670, + 296, + 713 + ], + "type": "text", + "content": "[9] Jiawei Chen, Hongyu Lin, Xianpei Han, and Le Sun. Benchmarking large language models in retrieval-augmented generation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17754-17762, 2024. 1" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 554, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 126 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 126 + ], + "type": "text", + "content": "[10] Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems (NeurIPS), 33:22243-22255, 2020. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 129, + 554, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 554, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 554, + 194 + ], + "type": "text", + "content": "[11] Xiang Chen, Ningyu Zhang, Lei Li, Shumin Deng, Chuanqi Tan, Changliang Xu, Fei Huang, Luo Si, and Huajun Chen. Hybrid transformer with multi-level fusion for multimodal knowledge graph completion. In Proceedings of the International Conference on Research and Development in Information Retrieva (SIGIR), pages 904-915, 2022. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 195, + 554, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 195, + 554, + 249 + ], + "spans": [ + { + "bbox": [ + 316, + 195, + 554, + 249 + ], + "type": "text", + "content": "[12] Xi Chen, Josip Djolonga, Piotr Padlewski, Basil Mustafa, Soravit Changpinyo, Jialin Wu, Carlos Riquelme Ruiz, Sebastian Goodman, Xiao Wang, Yi Tay, et al. Pali-x: On scaling up a multilingual vision and language model. arXiv preprint arXiv:2305.18565, 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 251, + 554, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 251, + 554, + 304 + ], + "spans": [ + { + "bbox": [ + 316, + 251, + 554, + 304 + ], + "type": "text", + "content": "[13] Xiang Chen, Jingtian Zhang, Xiaohan Wang, Ningyu Zhang, Tongtong Wu, Yuxiang Wang, Yongheng Wang, and Huajun Chen. Continual multimodal knowledge graph construction. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, 2024. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 305, + 554, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 305, + 554, + 358 + ], + "spans": [ + { + "bbox": [ + 316, + 305, + 554, + 358 + ], + "type": "text", + "content": "[14] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 360, + 554, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 360, + 554, + 403 + ], + "spans": [ + { + "bbox": [ + 316, + 360, + 554, + 403 + ], + "type": "text", + "content": "[15] Yong Chen, Xinkai Ge, Shengli Yang, Linmei Hu, Jie Li, and Jinwen Zhang. A survey on multimodal knowledge graphs: Construction, completion and applications. Mathematics, 11 (8), 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 404, + 554, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 404, + 554, + 458 + ], + "spans": [ + { + "bbox": [ + 316, + 404, + 554, + 458 + ], + "type": "text", + "content": "[16] Zhuo Chen, Yichi Zhang, Yin Fang, Yuxia Geng, Lingbing Guo, Xiang Chen, Qian Li, Wen Zhang, Jiaoyan Chen, Yushan Zhu, et al. Knowledge graphs meet multimodal learning: A comprehensive survey. arXiv preprint arXiv:2402.05391, 2024. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 460, + 554, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 460, + 554, + 514 + ], + "spans": [ + { + "bbox": [ + 316, + 460, + 554, + 514 + ], + "type": "text", + "content": "[17] Shiyao Cui, Jiangxia Cao, Xin Cong, Jiawei Sheng, Quanggang Li, Tingwen Liu, and Jinqiao Shi. Enhancing multimodal entity and relation extraction with variational information bottleneck. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 32:1274-1285, 2024. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 514, + 554, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 514, + 554, + 557 + ], + "spans": [ + { + "bbox": [ + 316, + 514, + 554, + 557 + ], + "type": "text", + "content": "[18] Xiaohui Cui, Xiaolong Qu, Dongmei Li, Yu Yang, Yuxun Li, and Xiaoping Zhang. Mkgcn: Multi-modal knowledge graph convolutional network for music recommender systems. *Electronics*, 12(12), 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 559, + 554, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 559, + 554, + 613 + ], + "spans": [ + { + "bbox": [ + 316, + 559, + 554, + 613 + ], + "type": "text", + "content": "[19] Yihe Deng, Chenchen Ye, Zijie Huang, Mingyu Derek Ma, Yiwen Kou, and Wei Wang. Graphvis: Boosting llms with visual knowledge graph integration. In Advances in Neural Information Processing Systems, pages 67511-67534. Curran Associates, Inc., 2024. 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 614, + 554, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 614, + 554, + 656 + ], + "spans": [ + { + "bbox": [ + 316, + 614, + 554, + 656 + ], + "type": "text", + "content": "[20] Tausif Diwan, G. Anirudh, and Jitendra V. Tembhurne. Object detection using yolo: challenges, architectural successors, datasets and applications. Multimedia Tools Appl., 82 (6):9243-9275, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 658, + 554, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 554, + 689 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 554, + 689 + ], + "type": "text", + "content": "[21] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 691, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 554, + 713 + ], + "type": "text", + "content": "[22] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropoli" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 106 + ], + "type": "text", + "content": "tansky, Robert Osazuwa Ness, and Jonathan Larson. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130, 2024. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 106, + 295, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 106, + 295, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 106, + 295, + 150 + ], + "type": "text", + "content": "[23] Quan Fang, Xiaowei Zhang, Jun Hu, Xian Wu, and Changsheng Xu. Contrastive multi-modal knowledge graph representation learning. IEEE Transactions on Knowledge and Data Engineering, 35(9):8983-8996, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 150, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 150, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 150, + 294, + 194 + ], + "type": "text", + "content": "[24] Duoduo Feng, Xiangteng He, and Yuxin Peng. Mkvse: Multimodal knowledge enhanced visual-semantic embedding for image-text retrieval. ACM Trans. Multimedia Comput. Commun. Appl., 19(5), 2023. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 194, + 294, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 194, + 294, + 249 + ], + "spans": [ + { + "bbox": [ + 56, + 194, + 294, + 249 + ], + "type": "text", + "content": "[25] Deepanway Ghosal, Navonil Majumder, Roy Ka-Wei Lee, Rada Mihalcea, and Soujanya Poria. Language guided visual question answering: Elevate your multimodal language model using knowledge-enriched prompts. arXiv preprint arXiv:2310.20159, 2023. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 250, + 295, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 250, + 295, + 304 + ], + "spans": [ + { + "bbox": [ + 56, + 250, + 295, + 304 + ], + "type": "text", + "content": "[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 304, + 295, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 304, + 295, + 360 + ], + "spans": [ + { + "bbox": [ + 56, + 304, + 295, + 360 + ], + "type": "text", + "content": "[27] Yunfei Guo, Fei Yin, Xiao-hui Li, Xudong Yan, Tao Xue, Shuqi Mei, and Cheng-Lin Liu. Visual traffic knowledge graph generation from scene images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 21604-21613, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 360, + 294, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 294, + 392 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 294, + 392 + ], + "type": "text", + "content": "[28] ZIRUI GUO, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. LightRAG: Simple and fast retrieval-augmented generation, 2024. 2, 5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 393, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 393, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 393, + 294, + 437 + ], + "type": "text", + "content": "[29] Lavdim Halilaj, Juergen Luettin, Sebastian Monka, Cory Henson, and Stefan Schmid. Knowledge graph-based integration of autonomous driving datasets. International Journal of Semantic Computing, 17(02):249-271, 2023. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 437, + 294, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 294, + 482 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 294, + 482 + ], + "type": "text", + "content": "[30] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 482, + 294, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 482, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 56, + 482, + 294, + 536 + ], + "type": "text", + "content": "[31] Yang Hu, Guihua Wen, Adriane Chapman, Pei Yang, Mingnan Luo, Yingxue Xu, Dan Dai, and Wendy Hall. Graph-based visual-semantic entanglement network for zero-shot image recognition. IEEE Transactions on Multimedia, 24: 2473-2487, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 537, + 294, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 537, + 294, + 613 + ], + "spans": [ + { + "bbox": [ + 56, + 537, + 294, + 613 + ], + "type": "text", + "content": "[32] Zhiqiang Hu, Lei Wang, Yihuai Lan, Wanyu Xu, Ee-Peng Lim, Lidong Bing, Xing Xu, Soujanya Poria, and Roy Lee. LLM-adapters: An adapter family for parameter-efficient fine-tuning of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5254-5276, Singapore, 2023. Association for Computational Linguistics. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 614, + 294, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 614, + 294, + 658 + ], + "spans": [ + { + "bbox": [ + 56, + 614, + 294, + 658 + ], + "type": "text", + "content": "[33] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 3, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 713 + ], + "type": "text", + "content": "[34] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 128 + ], + "type": "text", + "content": "[35] Adam Tauman Kalai and Santosh S. Vempala. Calibrated language models must hallucinate. In Proceedings of the 56th Annual ACM Symposium on Theory of Computing, page 160–171, New York, NY, USA, 2024. Association for Computing Machinery. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 128, + 553, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 128, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 316, + 128, + 553, + 217 + ], + "type": "text", + "content": "[36] Amar Viswanathan Kannan, Dmitriy Fradkin, Ioannis Akrotirianakis, Tugba Kulahcioglu, Arquimedes Canedo, Aditi Roy, Shih-Yuan Yu, Malawade Arnav, and Mohammad Abdullah Al Faruque. Multimodal knowledge graph for deep learning papers and code. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 3417-3420, New York, NY, USA, 2020. Association for Computing Machinery. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 217, + 553, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 217, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 316, + 217, + 553, + 270 + ], + "type": "text", + "content": "[37] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 270, + 553, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 270, + 553, + 337 + ], + "spans": [ + { + "bbox": [ + 316, + 270, + 553, + 337 + ], + "type": "text", + "content": "[38] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123:32-73, 2017. 6, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 337, + 553, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 337, + 553, + 403 + ], + "spans": [ + { + "bbox": [ + 316, + 337, + 553, + 403 + ], + "type": "text", + "content": "[39] Junlin Lee, Yequan Wang, Jing Li, and Min Zhang. Multimodal reasoning with multimodal knowledge graph. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10767-10782, Bangkok, Thailand, 2024. Association for Computational Linguistics. 2, 3, 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 403, + 553, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 403, + 553, + 459 + ], + "spans": [ + { + "bbox": [ + 316, + 403, + 553, + 459 + ], + "type": "text", + "content": "[40] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In Proceedings of the 39th International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 459, + 553, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 459, + 553, + 514 + ], + "spans": [ + { + "bbox": [ + 316, + 459, + 553, + 514 + ], + "type": "text", + "content": "[41] Junnan Li, Dongxu Li, Silvio Savarese, and Steven C. H. Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In Proceedings of the International Conference on Machine Learning (ICML), pages 19730–19742, 2023. 1, 3, 4, 6, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 514, + 553, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 514, + 553, + 568 + ], + "spans": [ + { + "bbox": [ + 316, + 514, + 553, + 568 + ], + "type": "text", + "content": "[42] Xin Li, Dongze Lian, Zhihe Lu, Jiawang Bai, Zhibo Chen, and Xinchao Wang. Graphadapter: Tuning vision-language models with dual knowledge graph. In Advances in Neural Information Processing Systems, pages 13448-13466. Curran Associates, Inc., 2023. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 569, + 553, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 569, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 316, + 569, + 553, + 613 + ], + "type": "text", + "content": "[43] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In Advances in Neural Information Processing Systems, pages 34892-34916. Curran Associates, Inc., 2023. 3, 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 614, + 553, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 614, + 553, + 669 + ], + "spans": [ + { + "bbox": [ + 316, + 614, + 553, + 669 + ], + "type": "text", + "content": "[44] Junming Liu, Yanting Gao, Siyuan Meng, Yifei Sun, Aoqi Wu, Yufei Jin, Yirong Chen, Ding Wang, and Guosun Zeng. Mosaic: Data-free knowledge distillation via mixture-of-experts for heterogeneous distributed environments. arXiv preprint arXiv:2505.19699, 2025. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "type": "text", + "content": "[45] Junming Liu, Guosun Zeng, Ding Wang, Yanting Gao, and Yufei Jin. Fedrecon: Missing modality reconstruction in distributed heterogeneous environments. arXiv preprint arXiv:2504.09941, 2025.3" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 137 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 137 + ], + "type": "text", + "content": "[46] Ye Liu, Hui Li, Alberto Garcia-Duran, Mathias Niepert, Daniel Onoro-Rubio, and David S Rosenblum. Mmkg: multi-modal knowledge graphs. In The Semantic Web: 16th International Conference, ESWC 2019, Portoroz, Slovenia, June 2–6, 2019, Proceedings 16, pages 459–474. Springer, 2019. 2, 6, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 139, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 139, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 139, + 294, + 194 + ], + "type": "text", + "content": "[47] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, 2021. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 195, + 294, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 195, + 294, + 259 + ], + "spans": [ + { + "bbox": [ + 56, + 195, + 294, + 259 + ], + "type": "text", + "content": "[48] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Advances in Neural Information Processing Systems, pages 2507–2521. Curran Associates, Inc., 2022. 2, 5, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 261, + 294, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 261, + 294, + 325 + ], + "spans": [ + { + "bbox": [ + 56, + 261, + 294, + 325 + ], + "type": "text", + "content": "[49] Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Advances in Neural Information Processing Systems, pages 43447-43478. Curran Associates, Inc., 2023. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 327, + 294, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 327, + 294, + 380 + ], + "spans": [ + { + "bbox": [ + 56, + 327, + 294, + 380 + ], + "type": "text", + "content": "[50] Gen Luo, Yiyi Zhou, Tianhe Ren, Shengxin Chen, Xiaoshuai Sun, and Rongrong Ji. Cheap and quick: Efficient vision-language instruction tuning for large language models. In Advances in Neural Information Processing Systems, pages 29615-29627. Curran Associates, Inc., 2023. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 382, + 294, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 382, + 294, + 435 + ], + "spans": [ + { + "bbox": [ + 56, + 382, + 294, + 435 + ], + "type": "text", + "content": "[51] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 437, + 294, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 294, + 480 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 294, + 480 + ], + "type": "text", + "content": "[52] Runqing Miao, Qingxuan Jia, Fuchun Sun, Gang Chen, Haiming Huang, and Shengyi Miao. Semantic representation of robot manipulation with knowledge graph. Entropy, 25(4), 2023. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 481, + 294, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 481, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 56, + 481, + 294, + 536 + ], + "type": "text", + "content": "[53] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431, 2024. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 537, + 294, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 537, + 294, + 578 + ], + "spans": [ + { + "bbox": [ + 56, + 537, + 294, + 578 + ], + "type": "text", + "content": "[54] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 580, + 294, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 580, + 294, + 645 + ], + "spans": [ + { + "bbox": [ + 56, + 580, + 294, + 645 + ], + "type": "text", + "content": "[55] Bryan A. Plummer, Liwei Wang, Chris M. Cervantes, Juan C. Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2015. 2, 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 647, + 294, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 294, + 690 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 294, + 690 + ], + "type": "text", + "content": "[56] Shengsheng Qian, Jun Hu, Quan Fang, and Changsheng Xu. Knowledge-aware multi-modal adaptive graph convolutional networks for fake news detection. ACM Trans. Multimedia Comput. Commun. Appl., 17(3), 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "text", + "content": "[57] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 333, + 72, + 553, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 553, + 127 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 553, + 127 + ], + "type": "text", + "content": "Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Proceedings of the 38th International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 129, + 553, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 182 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 182 + ], + "type": "text", + "content": "[58] Brian Reily, Christopher Reardon, and Hao Zhang. Representing multi-robot structure through multimodal graph embedding for the selection of robot teams. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 5576–5582, 2020. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 184, + 553, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 184, + 553, + 237 + ], + "spans": [ + { + "bbox": [ + 316, + 184, + 553, + 237 + ], + "type": "text", + "content": "[59] Joshua Robinson, Christopher Michael Ryting, and David Wingate. Leveraging large language models for multiple choice question answering. In Proceedings of the International Conference on Learning Representations (ICLR), 2023. 1, 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 239, + 553, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 239, + 553, + 316 + ], + "spans": [ + { + "bbox": [ + 316, + 239, + 553, + 316 + ], + "type": "text", + "content": "[60] Hrituraj Singh, Anshul Nasery, Denil Mehta, Aishwarya Agarwal, Jatin Lamba, and Balaji Vasan Srinivasan. MI-MOQA: Multimodal input multimodal output question answering. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5317-5332, Online, 2021. Association for Computational Linguistics. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 316, + 553, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 316, + 553, + 369 + ], + "spans": [ + { + "bbox": [ + 316, + 316, + 553, + 369 + ], + "type": "text", + "content": "[61] Yaoxian Song, Penglei Sun, Haoyu Liu, Zhixu Li, Wei Song, Yanghua Xiao, and Xiaofang Zhou. Scene-driven multimodal knowledge graph construction for embodied ai. IEEE Transactions on Knowledge and Data Engineering, 36(11): 6962-6976, 2024. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 371, + 553, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 371, + 553, + 448 + ], + "spans": [ + { + "bbox": [ + 316, + 371, + 553, + 448 + ], + "type": "text", + "content": "[62] Rui Sun, Xuezhi Cao, Yan Zhao, Junchen Wan, Kun Zhou, Fuzheng Zhang, Zhongyuan Wang, and Kai Zheng. Multimodal knowledge graphs for recommender systems. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management, page 1405-1414, New York, NY, USA, 2020. Association for Computing Machinery. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 449, + 553, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 553, + 502 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 553, + 502 + ], + "type": "text", + "content": "[63] Yu Sun, Shuohuan Wang, Shikun Feng, Siyu Ding, Chao Pang, Junyuan Shang, Jiaxiang Liu, Xuyi Chen, Yanbin Zhao, Yuxiang Lu, et al. Ernie 3.0: Large-scale knowledge enhanced pre-training for language understanding and generation. arXiv preprint arXiv:2107.02137, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 504, + 553, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 504, + 553, + 557 + ], + "spans": [ + { + "bbox": [ + 316, + 504, + 553, + 557 + ], + "type": "text", + "content": "[64] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 559, + 553, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 559, + 553, + 635 + ], + "spans": [ + { + "bbox": [ + 316, + 559, + 553, + 635 + ], + "type": "text", + "content": "[65] Shengbang Tong, Ellis L Brown II, Penghao Wu, Sanghyun Woo, ADITHYA JAIRAM IYER, Sai Charitha Akula, Shusheng Yang, Jihan Yang, Manoj Middepogu, Ziteng Wang, Xichen Pan, Rob Fergus, Yann LeCun, and Saining Xie. Cambrian-1: A fully open, vision-centric exploration of multimodal LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 636, + 553, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 636, + 553, + 690 + ], + "spans": [ + { + "bbox": [ + 316, + 636, + 553, + 690 + ], + "type": "text", + "content": "[66] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "text", + "content": "[67] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 105 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 105 + ], + "type": "text", + "content": "Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2017. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 295, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 295, + 151 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 295, + 151 + ], + "type": "text", + "content": "[68] Peng Wang, Qi Wu, Chunhua Shen, Anthony Dick, and Anton van den Hengel. Fvqa: Fact-based visual question answering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(10):2413-2427, 2018. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 294, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 294, + 207 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 294, + 207 + ], + "type": "text", + "content": "[69] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 3, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 209, + 294, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 274 + ], + "type": "text", + "content": "[70] Xin Wang, Benyuan Meng, Hong Chen, Yuan Meng, Ke Lv, and Wenwu Zhu. Tiva-kg: A multimodal knowledge graph with text, image, video and audio. In Proceedings of the 31st ACM International Conference on Multimedia, page 2391-2399, New York, NY, USA, 2023. Association for Computing Machinery. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 276, + 294, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 276, + 294, + 319 + ], + "spans": [ + { + "bbox": [ + 56, + 276, + 294, + 319 + ], + "type": "text", + "content": "[71] Yuequn Wang, Liyan Dong, Hao Zhang, Xintao Ma, Yongli Li, and Minghui Sun. An enhanced multi-modal recommendation based on alternate training with knowledge graph representation. IEEE Access, 8:213012-213026, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 321, + 294, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 321, + 294, + 387 + ], + "spans": [ + { + "bbox": [ + 56, + 321, + 294, + 387 + ], + "type": "text", + "content": "[72] Tao Wu, Mengze Li, Jingyuan Chen, Wei Ji, Wang Lin, Jinyang Gao, Kun Kuang, Zhou Zhao, and Fei Wu. Semantic alignment for multimodal large language models. In Proceedings of the 32nd ACM International Conference on Multimedia, page 3489-3498, New York, NY, USA, 2024. Association for Computing Machinery. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 388, + 294, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 294, + 432 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 294, + 432 + ], + "type": "text", + "content": "[73] Yike Wu, Nan Hu, Guilin Qi, Sheng Bi, Jie Ren, Anhuan Xie, and Wei Song. Retrieve-rewrite-answer: A kg-to-text enhanced llms framework for knowledge graph question answering. arXiv preprint arXiv:2309.11206, 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 434, + 294, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 434, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 56, + 434, + 294, + 498 + ], + "type": "text", + "content": "[74] Ziyang Xiao, Dongxiang Zhang, Yangjun Wu, Lilin Xu, Yuan Jessica Wang, Xiongwei Han, Xiaojin Fu, Tao Zhong, Jia Zeng, Mingli Song, and Gang Chen. Chain-of-experts: When LLMs meet complex operations research problems. In The Twelfth International Conference on Learning Representations, 2024. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 501, + 295, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 501, + 295, + 589 + ], + "spans": [ + { + "bbox": [ + 56, + 501, + 295, + 589 + ], + "type": "text", + "content": "[75] Dexuan Xu, Yanyuan Chen, Jieyi Wang, Yue Huang, Hanpin Wang, Zhi Jin, Hongxing Wang, Weihua Yue, Jing He, Hang Li, and Yu Huang. MLeVLM: Improve multi-level progressive capabilities based on multimodal large language model for medical visual question answering. In Findings of the Association for Computational Linguistics: ACL 2024, pages 4977-4997, Bangkok, Thailand, 2024. Association for Computational Linguistics. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 590, + 295, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 295, + 633 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 295, + 633 + ], + "type": "text", + "content": "[76] Peng Xu, Xiatian Zhu, and David A. Clifton. Multimodal learning with transformers: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(10):12113-12132, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 635, + 295, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 295, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 295, + 678 + ], + "type": "text", + "content": "[77] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2. 5 technical report. arXiv preprint arXiv:2412.15115, 2024. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 680, + 295, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 295, + 714 + ], + "type": "text", + "content": "[78] Xiaocui Yang, Wenfang Wu, Shi Feng, Ming Wang, Daling Wang, Yang Li, Qi Sun, Yifei Zhang, Xiaoming Fu, and Soujanya Poria. Mm-bigbench: Evaluating multimodal models" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 714 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "on multimodal content comprehension tasks. arXiv preprint arXiv:2310.09036, 2023. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 96, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 150 + ], + "type": "text", + "content": "[79] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 152, + 553, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 186 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 186 + ], + "type": "text", + "content": "[80] Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 186, + 553, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 186, + 553, + 241 + ], + "spans": [ + { + "bbox": [ + 316, + 186, + 553, + 241 + ], + "type": "text", + "content": "[81] Jingtong Yue, Zhiwei Lin, Xin Lin, Xiaoyu Zhou, Xiangtai Li, Lu Qi, Yongtao Wang, and Ming-Hsuan Yang. RobuR-CDet: Enhancing robustness of radar-camera fusion in bird's eye view for 3d object detection. In The Thirteenth International Conference on Learning Representations, 2025. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 243, + 553, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 243, + 553, + 296 + ], + "spans": [ + { + "bbox": [ + 316, + 243, + 553, + 296 + ], + "type": "text", + "content": "[82] Yichi Zhang, Zhuo Chen, Lingbing Guo, Yajing Xu, Binbin Hu, Ziqi Liu, Huajun Chen, and Wen Zhang. Mygo: Discrete modality information as fine-grained tokens for multi-modal knowledge graph completion. CoRR, abs/2404.09468, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 298, + 553, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 298, + 553, + 342 + ], + "spans": [ + { + "bbox": [ + 316, + 298, + 553, + 342 + ], + "type": "text", + "content": "[83] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multimodal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, 2024. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 344, + 553, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 344, + 553, + 388 + ], + "spans": [ + { + "bbox": [ + 316, + 344, + 553, + 388 + ], + "type": "text", + "content": "[84] Jiabao Zhao, Xin Lin, Jie Zhou, Jing Yang, Liang He, and Zhaohui Yang. Knowledge-based fine-grained classification for few-shot learning. In 2020 IEEE International Conference on Multimedia and Expo (ICME), pages 1-6, 2020. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 389, + 553, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 389, + 553, + 455 + ], + "spans": [ + { + "bbox": [ + 316, + 389, + 553, + 455 + ], + "type": "text", + "content": "[85] Changmeng Zheng, Junhao Feng, Ze Fu, Yi Cai, Qing Li, and Tao Wang. Multimodal relation extraction with efficient graph alignment. In Proceedings of the 29th ACM International Conference on Multimedia, page 5298-5306, New York, NY, USA, 2021. Association for Computing Machinery. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 456, + 553, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 456, + 553, + 510 + ], + "spans": [ + { + "bbox": [ + 316, + 456, + 553, + 510 + ], + "type": "text", + "content": "[86] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. In Advances in Neural Information Processing Systems, pages 5168-5191. Curran Associates, Inc., 2023. 3, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 512, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 512, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 512, + 553, + 567 + ], + "type": "text", + "content": "[87] Juncheng Zheng, Meiyu Liang, Yang Yu, Yawen Li, and Zhe Xue. Knowledge graph enhanced multimodal transformer for image-text retrieval. In 2024 IEEE 40th International Conference on Data Engineering (ICDE), pages 70-82, 2024. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 568, + 553, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 568, + 553, + 621 + ], + "spans": [ + { + "bbox": [ + 316, + 568, + 553, + 621 + ], + "type": "text", + "content": "[88] Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaogian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594, 2023. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 624, + 553, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 553, + 678 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 553, + 678 + ], + "type": "text", + "content": "[89] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. MiniGPT-4: Enhancing vision-language understanding with advanced large language models. In The Twelfth International Conference on Learning Representations, 2024. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 680, + 553, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 680, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 316, + 680, + 553, + 714 + ], + "type": "text", + "content": "[90] Xiangru Zhu, Zhixu Li, Xiaodan Wang, Xueyao Jiang, Penglei Sun, Xuwu Wang, Yanghua Xiao, and Nicholas Jing Yuan. Multi-modal knowledge graph construction and ap" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 129 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 75, + 72, + 296, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 296, + 95 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 296, + 95 + ], + "type": "text", + "content": "plication: A survey. IEEE Transactions on Knowledge and Data Engineering, 36(2):715-735, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 95, + 296, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 95, + 296, + 129 + ], + "spans": [ + { + "bbox": [ + 56, + 95, + 296, + 129 + ], + "type": "text", + "content": "[91] Zhengxia Zou, Keyan Chen, Zhenwei Shi, Yuhong Guo, and Jieping Ye. Object detection in 20 years: A survey. Proceedings of the IEEE, 111(3):257-276, 2023. 3" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 65, + 68, + 545, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 68, + 545, + 104 + ], + "spans": [ + { + "bbox": [ + 65, + 68, + 545, + 104 + ], + "type": "text", + "content": "Aligning Vision to Language: Annotation-Free Multimodal Knowledge Graph Construction for Enhanced LLMs Reasoning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 58, + 141, + 293, + 322 + ], + "blocks": [ + { + "bbox": [ + 58, + 141, + 293, + 322 + ], + "lines": [ + { + "bbox": [ + 58, + 141, + 293, + 322 + ], + "spans": [ + { + "bbox": [ + 58, + 141, + 293, + 322 + ], + "type": "image", + "image_path": "a3b9193854505da3f69bfbf1b89c3b890690e66f382f92dbb2246b00c8c73cc4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 331, + 296, + 376 + ], + "lines": [ + { + "bbox": [ + 55, + 331, + 296, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 331, + 296, + 376 + ], + "type": "text", + "content": "Figure 5. (a) The limited information contained in text-based KGs leads to inaccurate responses. (b) Leveraging MMKGs enables reasoning with enriched multimodal information to produce the correct answer." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 389, + 296, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 389, + 296, + 416 + ], + "spans": [ + { + "bbox": [ + 55, + 389, + 296, + 416 + ], + "type": "text", + "content": "A. Cross-Modal Reasoning Failures in Textual KGs" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 426, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 296, + 713 + ], + "type": "text", + "content": "Multimodal learning, by virtue of its capability to synergistically integrate heterogeneous data modalities, establishes a comprehensive knowledge acquisition paradigm that significantly enhances reasoning robustness [39]. This principle extends to Multimodal Knowledge Graphs (MMKGs), where the semantic symbiosis between visual and textual modalities addresses the critical limitation of modal isolation inherent in conventional text-based KGs. As empirically demonstrated in Figure 5, pure textual KGs often induce hallucinated or incomplete responses due to their inability to resolve visual-textual semantic ambiguities. For instance, when queried about fine-grained visual attributes (e.g., spatial relationships or object properties absent in textual metadata), LLMs grounded solely on textual KG triples frequently generate plausible but factually inconsistent answers, as they lack access to cross-modal referential grounding. In contrast, MMKGs bridge this gap through bidirectional visual-textual entity linking, enabling LLMs to retrieve and reason over fused evidence from both modalities. Our qualitative analysis of the case in Figure 5 reveals that the multimodal reasoning path—leveraging both image-derived entities and textual relationships—is essential for deriving logically coherent and factually accurate conclusions." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 315, + 141, + 553, + 259 + ], + "blocks": [ + { + "bbox": [ + 315, + 141, + 553, + 259 + ], + "lines": [ + { + "bbox": [ + 315, + 141, + 553, + 259 + ], + "spans": [ + { + "bbox": [ + 315, + 141, + 553, + 259 + ], + "type": "image", + "image_path": "4482265fe190048fcacd251960a888f5a863aba009211a7e298bd63dc9539739.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 269, + 555, + 290 + ], + "lines": [ + { + "bbox": [ + 313, + 269, + 555, + 290 + ], + "spans": [ + { + "bbox": [ + 313, + 269, + 555, + 290 + ], + "type": "text", + "content": "Figure 6. Three example social media posts with labelled named entities [8]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 317, + 293, + 559, + 424 + ], + "blocks": [ + { + "bbox": [ + 317, + 293, + 559, + 424 + ], + "lines": [ + { + "bbox": [ + 317, + 293, + 559, + 424 + ], + "spans": [ + { + "bbox": [ + 317, + 293, + 559, + 424 + ], + "type": "table", + "html": "
Type#ChainsMentions/ChainBoxes/Chain
people597663.171.95
clothing423801.761.44
body parts128091.501.42
animals50863.631.44
vehicles55612.771.21
instruments18272.851.61
scene469192.030.62
other820981.941.04
total2440352.101.13
", + "image_path": "70c509381366e918387c50f30aab692b2a319db199d55fff823b7322e85e7c0d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 432, + 555, + 475 + ], + "lines": [ + { + "bbox": [ + 313, + 432, + 555, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 432, + 555, + 475 + ], + "type": "text", + "content": "Table 6. Coreference chain statistics of Flickr30K-Entity. The number of mentions per chain indicates how salient an entity is. The number of boxes per chain indicates how many distinct entities it refers to." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 498, + 553, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 553, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 553, + 525 + ], + "type": "text", + "content": "B. Case Studies on Manual Annotation Overheads" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 534, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 534, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 534, + 555, + 713 + ], + "type": "text", + "content": "The development of robust entity extraction models typically hinges on large-scale annotated corpora, yet the generalizability of these models remains intrinsically bounded by the semantic scope and granularity of their training datasets. Widely-adopted benchmarks such as Flickr30K-Entity [55] exemplify this constraint: while serving as de facto standards for evaluating visual-linguistic entity grounding, their construction necessitates labor-intensive manual annotations at scale. As illustrated in Figure 6, even high-quality annotations in such datasets often adopt a minimalist tagging paradigm—identifying only coarse-grained entities while neglecting fine-grained attributes and contextual relationships. This sparsity of semantic enrichment directly propagates to trained models, which consequently fail to capture the compositional semantics necessary for com" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 294, + 205 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 294, + 205 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 294, + 205 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 294, + 205 + ], + "type": "image", + "image_path": "b3d612618801180dba35a284f64b34eea05de762b170c942e03ec9dd5a4b8bdd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 214, + 295, + 249 + ], + "lines": [ + { + "bbox": [ + 55, + 214, + 295, + 249 + ], + "spans": [ + { + "bbox": [ + 55, + 214, + 295, + 249 + ], + "type": "text", + "content": "Figure 7. An example from the ScienceQA benchmark [48], illustrating multimodal question-answering scenarios that necessitate joint reasoning over textual prompts and visual evidence." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 257, + 160, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 257, + 160, + 269 + ], + "spans": [ + { + "bbox": [ + 55, + 257, + 160, + 269 + ], + "type": "text", + "content": "plex reasoning scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 278, + 295, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 295, + 306 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 295, + 306 + ], + "type": "text", + "content": "C. Case Studies on Visual Specificity Deficits in VLM-Generated Captions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 312, + 296, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 296, + 529 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 296, + 529 + ], + "type": "text", + "content": "As exemplified in Figure 7, vision-language models like BLIP-2 [41] tend to produce oversimplified textual descriptions that critically lack actionable visual-semantic signals. The VLM-generated caption (\"A map of the united states with the location of the united states\") merely identifies coarse-grained scene semantics, failing to capture object-level attributes (color coding of regions), spatial relationships (border adjacency between Arizona and Mexico) and compositional context (compass orientation in lower-right corner). In contrast, human annotations (\"This is a map of the United States. The main part of the country is shown in green, with several states labeled. Arizona is in the southwestern part of the US, bordering Mexico. Oklahoma is in the central - southern region. Louisiana is located along the Gulf of Mexico in the southeastern part. West Virginia is in the eastern part of the country. There's also a compass in the bottom - right corner to show directions.\") demonstrate essential characteristics for multimodal reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 537, + 294, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 537, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 55, + 537, + 294, + 552 + ], + "type": "text", + "content": "D. Retrieval Strategy in MMKG Construction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 558, + 295, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 558, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 55, + 558, + 295, + 582 + ], + "type": "text", + "content": "We adopt retrieval strategies based on the framework provided by LightRAG [28], which supports multiple modes:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 582, + 294, + 653 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 55, + 582, + 264, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 264, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 264, + 594 + ], + "type": "text", + "content": "- local: focuses on context-dependent information;" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 594, + 204, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 594, + 204, + 605 + ], + "spans": [ + { + "bbox": [ + 56, + 594, + 204, + 605 + ], + "type": "text", + "content": "- global: utilizes global knowledge;" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 606, + 280, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 606, + 280, + 617 + ], + "spans": [ + { + "bbox": [ + 56, + 606, + 280, + 617 + ], + "type": "text", + "content": "- hybrid: combines local and global retrieval methods;" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 618, + 294, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 618, + 294, + 642 + ], + "spans": [ + { + "bbox": [ + 56, + 618, + 294, + 642 + ], + "type": "text", + "content": "- naive: performs basic search without advanced techniques;" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 642, + 282, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 282, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 282, + 653 + ], + "type": "text", + "content": "- mix: integrates knowledge graph and vector retrieval;" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 654, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 295, + 713 + ], + "type": "text", + "content": "In our implementation, we rely on the hybrid retrieval mode, which balances the precision of local cues with the breadth of global knowledge. This strategy improves the relevance and completeness of retrieved information, which is crucial for high-quality MMKG construction." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 72, + 451, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 451, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 451, + 83 + ], + "type": "text", + "content": "Algorithm 1 MMKG Generation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "spans": [ + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "type": "text", + "content": "Require: " + }, + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "type": "text", + "content": " (refined description), " + }, + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 314, + 89, + 553, + 114 + ], + "type": "text", + "content": " (external knowledge, optional)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 114, + 481, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 114, + 481, + 125 + ], + "spans": [ + { + "bbox": [ + 315, + 114, + 481, + 125 + ], + "type": "text", + "content": "Ensure: " + }, + { + "bbox": [ + 315, + 114, + 481, + 125 + ], + "type": "inline_equation", + "content": "\\mathcal{G} = (\\mathcal{E},\\mathcal{R})" + }, + { + "bbox": [ + 315, + 114, + 481, + 125 + ], + "type": "text", + "content": " (knowledge graph)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 321, + 125, + 553, + 174 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "spans": [ + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "text", + "content": "1: " + }, + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{T}\\gets \\hat{S}\\oplus T" + }, + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "text", + "content": " Concatenate " + }, + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 321, + 125, + 553, + 137 + ], + "type": "inline_equation", + "content": "T" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "spans": [ + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "type": "text", + "content": "2: " + }, + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "type": "inline_equation", + "content": "\\mathcal{G} \\leftarrow" + }, + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "type": "text", + "content": " LightRAG(T) " + }, + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 321, + 137, + 553, + 149 + ], + "type": "text", + "content": " Generate graph via LightRAG" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 321, + 149, + 553, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 149, + 553, + 161 + ], + "spans": [ + { + "bbox": [ + 321, + 149, + 553, + 161 + ], + "type": "text", + "content": "3: " + }, + { + "bbox": [ + 321, + 149, + 553, + 161 + ], + "type": "inline_equation", + "content": "(\\mathcal{E},\\mathcal{R})\\gets f_{\\mathrm{ERE}}(\\mathcal{T})" + }, + { + "bbox": [ + 321, + 149, + 553, + 161 + ], + "type": "text", + "content": " Extract entities and relations" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 321, + 162, + 494, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 162, + 494, + 174 + ], + "spans": [ + { + "bbox": [ + 321, + 162, + 494, + 174 + ], + "type": "text", + "content": "4: return " + }, + { + "bbox": [ + 321, + 162, + 494, + 174 + ], + "type": "inline_equation", + "content": "\\mathcal{G} = \\{(h,r,t)\\mid h,t\\in \\mathcal{E},r\\in \\mathcal{R}\\}" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 198, + 555, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 198, + 555, + 389 + ], + "spans": [ + { + "bbox": [ + 313, + 198, + 555, + 389 + ], + "type": "text", + "content": "LightRAG is an excellent project that effectively supports automatic MMKG construction, and its retrieval design plays a central role in our framework. Specifically, LightRAG introduces keyword-guided text chunking to expand the retrievable context. By leveraging both high-level and low-level keywords in combination with chunk-level vector retrieval, it enables more comprehensive knowledge access. In addition, the choice of the retrieval model is also important. Larger LLMs have slower retrieval speeds but better performance. In this experiment, we used Qwen2.5-7B for retrieval. We also tested the retrieval performance of 32B and 72B models, which showed a " + }, + { + "bbox": [ + 313, + 198, + 555, + 389 + ], + "type": "inline_equation", + "content": "1\\% - 5\\%" + }, + { + "bbox": [ + 313, + 198, + 555, + 389 + ], + "type": "text", + "content": " improvement in performance, but it also significantly increased the graph construction time. Therefore, we finally adopted a lightweight retrieval model. The details of the entire LightRAG are shown in Algorithm 1." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 399, + 511, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 399, + 511, + 414 + ], + "spans": [ + { + "bbox": [ + 313, + 399, + 511, + 414 + ], + "type": "text", + "content": "E. Selection of Sensitivity Threshold " + }, + { + "bbox": [ + 313, + 399, + 511, + 414 + ], + "type": "inline_equation", + "content": "\\tau" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "spans": [ + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "content": "We select the sensitivity threshold " + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "content": " empirically based on performance on the validation set. In practice, " + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "content": " can be approximately determined by observing the token length distribution of captions: datasets with richer visual content and longer captions tend to benefit from a lower " + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "content": ", while simpler datasets can tolerate a higher " + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "content": ". This provides a practical way to adjust " + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 419, + 554, + 503 + ], + "type": "text", + "content": " without extensive tuning." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "spans": [ + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "text", + "content": "In addition, we notice a key pattern when analyzing the relevance scores across windows. Around certain values of " + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "text", + "content": ", the scores tend to cluster tightly on both sides of the threshold. As a result, even a small change in " + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "text", + "content": " near these points can lead to a large change in the number of tokens being pruned. This indicates that the pruning process is especially sensitive around those points, and adjusting " + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 503, + 554, + 600 + ], + "type": "text", + "content": " even slightly may have a big impact on the final token budget." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 609, + 504, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 609, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 609, + 504, + 624 + ], + "type": "text", + "content": "F. Construction Cost and Scalability" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 629, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 554, + 714 + ], + "type": "text", + "content": "Construction cost is a complex issue, which we analyze from the perspectives of time and hardware requirements. Time-wise, the main components are CoE and LightRAG. While using APIs can significantly speed up the process, offline deployment and inference are also feasible. For example, generating descriptions with Qwen2-VL-7B achieves around 60 tokens per second, processing one image ev" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 57, + 72, + 294, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 72, + 294, + 203 + ], + "spans": [ + { + "bbox": [ + 57, + 72, + 294, + 203 + ], + "type": "text", + "content": "ery 4 seconds. Thus, processing 1k images takes approximately 1.21 hours. Constructing a KG with Qwen2.5-7B yields about 196k tokens per hour, leading to a total of 1.33 hours for 1k images. The intermediate pruning step, accelerated by CLIP's fast processing speed, is negligible. Overall, the cost is much lower than manual annotation or fine-tuning LLMs, making the method applicable to largescale datasets. For resource-constrained users, deploying a lightweight VLM with CoE is comparable to or even more efficient than deploying a powerful VLM, further demonstrating the scalability of our approach." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 58, + 213, + 294, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 213, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 58, + 213, + 294, + 239 + ], + "type": "text", + "content": "G. Discussion on VLM Usage and Design Flexibility" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 247, + 294, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 247, + 294, + 365 + ], + "spans": [ + { + "bbox": [ + 57, + 247, + 294, + 365 + ], + "type": "text", + "content": "Our observations on the number and type of VLMs used in CoE are consistent with the original conclusions drawn in the CoE paper [74]. Regardless of the specific VLM architecture, increasing the number of models " + }, + { + "bbox": [ + 57, + 247, + 294, + 365 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 57, + 247, + 294, + 365 + ], + "type": "text", + "content": " consistently improves performance up to a saturation point, after which further scaling yields diminishing returns. Moreover, we find that convergence is achieved more quickly when using lower softmax temperatures or simpler datasets. These factors reduce the ambiguity in model disagreement, allowing consensus to form more rapidly among the ensemble." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 367, + 294, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 367, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 57, + 367, + 294, + 461 + ], + "type": "text", + "content": "Interestingly, our results also show that using a single, strong VLM can achieve performance comparable to a cascade of smaller, lightweight models. This suggests a practical trade-off between model strength and ensemble size—while ensembling helps in reaching consensus across diverse weak learners, a single high-capacity model may suffice in many scenarios, especially when computational resources are limited." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 463, + 294, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 463, + 294, + 629 + ], + "spans": [ + { + "bbox": [ + 57, + 463, + 294, + 629 + ], + "type": "text", + "content": "In the original CoE method, the outputs from all VLM experts are first aggregated together, and then a selection process determines which expert descriptions to use. To save time in constructing the MMKGs with LLMs, we instead adopted a sequential strategy where the output of one expert is used as the prompt input for the next. We also evaluated the original aggregation and selection strategy on a smaller-scale dataset and found it to perform well, sometimes even surpassing the sequential approach. This confirms that CoE's original design of aggregating all experts' outputs before selecting which descriptions to use is effective and remains a strong baseline. However, correspondingly, using LLMs to construct MMKGs based on these aggregated descriptions requires significantly more time." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 631, + 294, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 631, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 631, + 294, + 712 + ], + "type": "text", + "content": "Additionally, while we apply pruning only at the final description step, pruning during intermediate steps may also yield good results depending on the dataset and task. There is no fixed rule for when or how to apply pruning, and our framework is designed to be flexible enough to accommodate different strategies. We emphasize that both our CoE framework and the SV step are intended to be adaptable, al" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 73, + 553, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 317, + 73, + 553, + 95 + ], + "type": "text", + "content": "lowing users to experiment freely and select the approach that best suits their needs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 97, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 97, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 317, + 97, + 553, + 156 + ], + "type": "text", + "content": "There are various VLMs that can be used for pruning. Among them, we recommend CLIP due to its fast inference speed and pruning performance comparable to other VLMs. Given its efficiency and effectiveness, CLIP serves as a practical choice for pruning in many scenarios." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_content_list.json b/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0df42419839dc77c4f007da4b96d6ada1f315d6f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_content_list.json @@ -0,0 +1,2654 @@ +[ + { + "type": "text", + "text": "ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models", + "text_level": 1, + "bbox": [ + 102, + 128, + 893, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hao Yin Gunagzong Si Zilei Wang*", + "bbox": [ + 338, + 203, + 663, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Science and Technology of China", + "bbox": [ + 310, + 222, + 684, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yinhnavi, guangzongsi}@mail.ustc.edu.cn, zlwang@ustc.edu.cn", + "bbox": [ + 230, + 241, + 759, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 291, + 325, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Contrastive decoding strategies are widely used to mitigate object hallucinations in multimodal large language models (MLLMs). By reducing over-reliance on language priors, these strategies ensure that generated content remains closely grounded in visual inputs, producing contextually accurate outputs. Since contrastive decoding requires no additional training or external tools, it offers both computational efficiency and versatility, making it highly attractive. However, these methods present two main limitations: (1) bluntly suppressing language priors can compromise coherence and accuracy of generated content, and (2) processing contrastive inputs adds computational load, significantly slowing inference speed. To address these challenges, we propose Visual Amplification Fusion (VAF), a plug-and-play technique that enhances attention to visual signals within the model's middle layers, where modality fusion predominantly occurs. This approach enables more effective capture of visual features, reducing the model's bias toward language modality. Experimental results demonstrate that VAF significantly reduces hallucinations across various MLLMs without affecting inference speed, while maintaining coherence and accuracy in generated outputs. The code is available at https://github.com/ustc-hyin/ClearSight.", + "bbox": [ + 89, + 321, + 483, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 715, + 220, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, MLLMs [8, 27, 33, 35, 54, 55] have achieved remarkable progress in the intersecting fields of computer vision and natural language processing, and have been widely applied in tasks such as image captioning and visual question answering. However, these models often encounter the issue of \"object hallucination\" [15, 29, 32, 37] in practical applications, where the generated textual descriptions do not match the actual objects in the image. This problem highlights an over-reliance on unimodal pri", + "bbox": [ + 89, + 739, + 482, + 876 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ors (especially language priors) [17, 48, 50, 53] during inference, posing potential risks in high-precision applications such as medical diagnosis [18, 46] and autonomous driving [9, 34, 39, 49].", + "bbox": [ + 511, + 292, + 903, + 353 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address object hallucination [1, 2, 7, 14], several Contrastive Decoding strategies have been introduced in recent years. Among these, the Visual Contrastive Decoding (VCD) method has shown promise in reducing hallucinations by contrasting output distributions from both original and perturbed visual inputs, thus mitigating the model's excessive reliance on language priors [16, 41]. Notably, contrastive decoding methods do not require additional training or external tools, offering both computational efficiency and versatility, which has garnered them significant attention. However, these methods present two main limitations:", + "bbox": [ + 511, + 354, + 906, + 521 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Limitations of Contrastive Decoding", + "text_level": 1, + "bbox": [ + 540, + 534, + 782, + 549 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- While reducing over-reliance on language priors, these methods may compromise the coherence and accuracy of generated content.", + "- Contrastive decoding necessitates separate processing of the original and contrastive inputs, which considerably increases inference time." + ], + "bbox": [ + 539, + 561, + 880, + 652 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address these shortcomings, we hope to propose a training-free method that can effectively reduces hallucinations without compromising content quality or inference speed. Our saliency analysis of the model's attention maps reveals that biases toward language in generated content do not arise from an overemphasis on language signals but rather from insufficient attention on visual information during modality fusion. Based on this insight, we introduce a novel, plug-and-play technique to mitigate hallucinations: Visual Amplification Fusion (VAF).", + "bbox": [ + 511, + 672, + 906, + 823 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our analysis indicates that modality fusion in MLLMs primarily occurs within the middle layers. VAF specifically amplifies visual signals at these middle layers, enabling the model to capture more distinctive visual features during fusion, which in turn reduces false descriptions in generated", + "bbox": [ + 511, + 824, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13107v2 [cs.CV] 27 May 2025", + "bbox": [ + 22, + 255, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Author", + "bbox": [ + 109, + 887, + 236, + 898 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "text. This technique not only strengthens the model's visual representations but also retains the beneficial influence of language priors, thus preserving content quality. Furthermore, by eliminating the need to process contrastive samples, VAF maintains inference speed.", + "bbox": [ + 89, + 90, + 482, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Experimental results validate the effectiveness of the VAF method. Across multiple object hallucination benchmarks, VAF demonstrated notable performance gains, with improvements of approximately $3\\%$ on POPE and $7\\%$ on MME. In terms of coherence and accuracy of generated responses, VCD caused a roughly $19\\%$ decrease on NoCaps, while VAF maintained content quality without negative impacts. Additionally, VCD reduced inference speed by $50\\%$ , whereas VAF had virtually no effect on inference speed.", + "bbox": [ + 89, + 167, + 482, + 303 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, the main contributions are as follows:", + "bbox": [ + 107, + 304, + 444, + 318 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We identify the negative impacts of contrastive decoding methods on both the quality of generated content and model inference speed.", + "- We analyze the modality fusion mechanism in MLLMs, highlighting its insufficient attention to visual information.", + "- We introduce the VAF method, which effectively mitigates the object hallucination problem while maintaining inference speed, coherence, and accuracy.", + "- We demonstrate the significant performance improvements of the VAF method across multiple object hallucination benchmarks." + ], + "bbox": [ + 89, + 319, + 482, + 500 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 89, + 516, + 227, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Multimodal Large Language Models", + "text_level": 1, + "bbox": [ + 89, + 541, + 408, + 558 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The development of MLLMs [26, 36, 51, 52] has advanced from BERT-based decoders to LLM-based architectures [4, 11, 40, 43-45], enabling improved multimodal relationship capture [6, 10, 24, 25]. Models like BLIP-2 [27] and miniGPT-4 [55] incorporate a Q-Former mechanism, which enhances the alignment between visual and textual inputs, allowing for more precise interactions across modalities. InstructBLIP [12] builds on this approach by adding task-specific instructions, which improve the model's understanding of context-sensitive visual semantics. LLaVA [33] and Qwen-VL [5] utilize simpler linear projection techniques that streamline the alignment process, resulting in improved overall performance on vision-language tasks. However, hallucination issues persist across MLLMs, posing a significant challenge that requires further research.", + "bbox": [ + 89, + 564, + 482, + 791 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Contrastive Decoding Strategies", + "text_level": 1, + "bbox": [ + 89, + 801, + 372, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In recent years, Contrastive Decoding [19, 21, 22, 28] has emerged as a technique to improve generative model accuracy through contrastive judgment, widely employed to address hallucinations in generated content. For instance, Visual Contrastive Decoding (VCD) [23] contrasts output", + "bbox": [ + 89, + 824, + 482, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "distributions derived from original and distorted visual inputs, effectively reducing the over-reliance on statistical bias and unimodal priors, two essential causes of object hallucinations. Similarly, Instruction Contrastive Decoding (ICD) [47] works by comparing distributions derived from standard and disrupted instructions, thereby removing hallucinated concepts from the original distribution. These contrastive methods help ground generated content closely to visual inputs, resulting in contextually accurate outputs. However, despite these advancements, contrastive decoding faces two primary limitations: slower inference speed and reduced coherence in generated content. To overcome these limitations, we propose the VAF method, which achieves effective hallucination reduction while preserving both inference speed and content coherence.", + "bbox": [ + 511, + 90, + 906, + 318 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Preliminary and Motivation", + "text_level": 1, + "bbox": [ + 511, + 329, + 772, + 345 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In Sec. 3.1, we illustrate the working mechanism of contrastive decoding to mitigate hallucinations, using Visual Contrastive Decoding as an example. In Sec. 3.2, we analysis two main drawbacks of this approach: its potential to disrupt the coherence and accuracy of generated content, and its tendency to slow down model inference.", + "bbox": [ + 511, + 354, + 906, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Contrastive Decoding", + "text_level": 1, + "bbox": [ + 511, + 453, + 718, + 469 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We consider a MLLM parametrized by $\\theta$ . The model takes as input a textual query $x$ and a visual input $v$ , where $v$ provides contextual visual information to assist the model in generating a relevant response $y$ to the textual query. The response $y$ is sampled auto-regressively from the probability distribution conditioned on the query $x$ and the visual context $v$ . Mathematically, this can be formulated as:", + "bbox": [ + 511, + 474, + 905, + 580 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} y _ {t} \\sim p _ {\\theta} \\left(y _ {t} \\mid v, x, y _ {< t}\\right) \\tag {1} \\\\ \\propto \\exp \\operatorname {l o g i t} _ {\\theta} \\left(y _ {t} \\mid v, x, y _ {< t}\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 589, + 903, + 625 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $y_{t}$ denotes the token at time step $t$ , and $y_{< t}$ represents the sequence of generated tokens up to the time step $(t - 1)$ .", + "bbox": [ + 511, + 632, + 905, + 662 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To mitigate the issue of object hallucination in MLLMs, contrastive decoding techniques can be applied. Here, we present Visual Contrastive Decoding (VCD) as a representative approach, shown in Fig. 1. Specifically, given a textual query $x$ and a visual input $v$ , the model generates two distinct output distributions: one conditioned on the original $v$ and the other on the distorted visual input $v'$ , which is derived by applying pre-defined distortions (i.e., Gaussian noise mask) to $v$ . Then, a new contrastive probability distribution is computed by exploiting the differences between the two initially obtained distributions. The new contrastive distribution $p_{vcd}$ is formulated as:", + "bbox": [ + 511, + 662, + 906, + 844 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} p _ {v c d} (y \\mid v, v ^ {\\prime}, x) = \\text {s o f t m a x} \\left[ \\operatorname {l o g i t} _ {\\theta} (y \\mid v, x) + \\right. \\tag {2} \\\\ \\left. \\alpha \\cdot \\left(\\operatorname {l o g i t} _ {\\theta} (y \\mid v, x) - \\operatorname {l o g i t} _ {\\theta} (y \\mid v ^ {\\prime}, x)\\right) \\right], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 535, + 849, + 903, + 904 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where larger $\\alpha$ values indicate a stronger amplification of differences between the two distributions ( $\\alpha = 0$ reduces to regular decoding). Essentially, VCD serves as a corrective mechanism, reducing hallucinations by contrasting against a distribution predisposed to favoring them.", + "bbox": [ + 89, + 90, + 483, + 167 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4758f95feb2921349b4687274e578b427177cad7402749623729eb7e539198b3.jpg", + "image_caption": [ + "Figure 1. Illustration of Visual Contrastive Decoding. The hallucinated object \"Teacher\" is suppressed by contrasting with an output distribution prone to hallucinations. This method has two main drawbacks: (1) additional processing of distorted visual inputs greatly increases inference time; (2) subtracting the language prior disrupts content coherence." + ], + "image_footnote": [], + "bbox": [ + 96, + 179, + 478, + 324 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Limitations of Contrastive Decoding", + "text_level": 1, + "bbox": [ + 89, + 443, + 406, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As contrastive decoding methods do not require training or external tools, they offer high computational efficiency and generalizability, attracting significant attention in academia. However, these methods still have two major drawbacks: a reduction in the quality of generated content and slower inference speed.", + "bbox": [ + 89, + 465, + 483, + 556 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ba9b2e85d431481e3a8e32ea539a3d303b45e8f405ca48188412fd89af77be26.jpg", + "image_caption": [ + "Figure 2. Impact of VCD on Model Performance. CIDEr scores are reported on the Nocaps benchmark, while Accuracy is presented for the ScienceQA benchmark. The use of VCD leads to a significant decline in model performance." + ], + "image_footnote": [], + "bbox": [ + 120, + 571, + 444, + 741 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While contrasting logits of $p_{\\theta}(y \\mid v, x)$ and $p_{\\theta}(y \\mid v', x)$ can help reduce over-reliance on language priors and mitigate hallucination in MLLMs-as evidenced by a $4\\%$ performance gain on the POPE benchmark using the VCD method-merely decreasing the influence of the language", + "bbox": [ + 89, + 824, + 483, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "modality on the output distribution may undermine the coherence of the generated content, potentially leading to prediction errors. This issue is less pronounced in straightforward object hallucination tasks, where responses are limited to binary options, such as \"yes\" or \"no\". However, in more complex tasks, including multiple-choice question answering and image caption, the impact of contrastive learning methods on content quality becomes more significant.", + "bbox": [ + 511, + 90, + 903, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To verify this, we applied VCD method to LLaVA-v1.5-7B and LLaVA-v1.5-13B models, assessing their performance on the ScienceQA [38] and NoCaps benchmarks. As illustrated in Fig. 2, our findings reveal that, following the application of VCD, model performance decreased by $5\\%$ on ScienceQA and by a considerable $45\\%$ on NoCaps. These results suggest that in tasks requiring nuanced natural language generation, contrastive decoding methods can substantially impair content quality.", + "bbox": [ + 511, + 210, + 906, + 348 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/f881ed1da6ab97af7e3a403c5126e7aeccbba2ec8e37e759b4095a3ea379329b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMethodScienceQANocaps
LLaVA-v1.5-7BRegular0.141s0.456s
VCD0.293s1.086s
LLaVA-v1.5-13BRegular0.222s0.602s
VCD0.459s1.372s
", + "bbox": [ + 529, + 358, + 890, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1. Impact of VCD on Model Inference Speed. The table shows the average inference time per sample (in seconds) on the ScienceQA and Nocaps benchmarks. Applying the VCD method nearly doubled the inference time of the model.", + "bbox": [ + 511, + 474, + 906, + 531 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contrastive decoding methods notably reduce inference speed because they require calculating the output distribution for additional contrastive samples. For instance, in VCD method, each visual input $v$ necessitates computing the logits of both $p_{\\theta}(y \\mid v, x)$ and $p_{\\theta}(y \\mid v', x)$ separately. This doubles the computation load during inference compared to vanilla decoding. We evaluated the inference speed of VCD versus vanilla decoding on ScienceQA. The experimental results, shown in Tab. 1, reveal that VCD's inference time is almost double that of vanilla decoding.", + "bbox": [ + 511, + 546, + 905, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Visual Neglect in Modal Fusion", + "text_level": 1, + "bbox": [ + 511, + 709, + 802, + 726 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The primary objective of this section is to examine why MLLMs tend to rely excessively on language priors in their predictions. In Sec. 4.1, saliency analysis reveals that image tokens influence prediction outcomes mainly through interactions with instruction tokens within the middle layers. Sec. 4.2 then compares attention weights across different modalities, showing that the attention given to visual features is notably lower than that allocated to system prompts and user instructions. These findings indicate that visual information is often underutilized in the modality fusion process, resulting in an over-reliance on language priors.", + "bbox": [ + 511, + 734, + 906, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. Mid-layer: Visual-Language Fusion", + "text_level": 1, + "bbox": [ + 89, + 90, + 403, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To uncover why MLLMs tend to overly rely on language priors and overlook visual content in prediction, it is necessary first to clarify how the model utilizes visual modality information. This section explores the influence of the visual modality on prediction outcomes from the perspective of visual information interaction.", + "bbox": [ + 89, + 113, + 482, + 203 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We employ the saliency technique, a widely used interpretability tool, to highlight key token interactions within the attention mechanism. Following established practices, we utilize Taylor expansion to compute saliency scores for each element of the attention matrix:", + "bbox": [ + 89, + 205, + 483, + 280 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {l} = \\left| \\sum_ {h} A _ {h, l} \\odot \\frac {\\partial \\mathcal {L} (x)}{\\partial A _ {h , l}} \\right|. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 291, + 482, + 333 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $A_{h,l}$ represents the attention matrix value for the $h$ -th attention head in the $l$ -th layer, $x$ denotes the input, and $\\mathcal{L}(x)$ is the loss function of the task, e.g., the cross-entropy objective for question-answering tasks. The saliency matrix $I_{l}$ for the $l$ -th layer is obtained by averaging across all attention heads. The significance of information flow from the $j$ -th token to the $i$ -th token in MLLMs is represented by $I_{l}(i,j)$ .", + "bbox": [ + 89, + 345, + 483, + 465 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To draw a clearer picture of visual information flow in MLLMs, we introduce two quantitative metrics based on $I_{l}(i,j)$ , with a particular focus on the information interaction involving image tokens. The definitions of the two quantitative metrics follow below.", + "bbox": [ + 89, + 467, + 483, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$S_{vv}$ , measuring the importance of information flow among image tokens:", + "bbox": [ + 89, + 542, + 483, + 573 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {v v} = \\frac {\\sum_ {(i , j) \\in C _ {v v}} I _ {l} (i , j)}{\\left| C _ {v v} \\right|} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 585, + 480, + 621 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC _ {v v} = \\{(i, j): i, j \\in \\mathcal {V}, i \\geq j \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 625, + 393, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$S_{vt}$ , measuring the importance of information flow from image tokens to instruction tokens:", + "bbox": [ + 89, + 652, + 483, + 684 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {v t} = \\frac {\\sum_ {(i , j) \\in C _ {v t}} I _ {l} (i , j)}{\\left| C _ {v t} \\right|} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 696, + 482, + 732 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC _ {v t} = \\{(i, j): i \\in \\mathcal {T}, j \\in \\mathcal {V} \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 734, + 388, + 751 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Here, $\\mathcal{V}$ represents the index set of image tokens, derived from features learned by pre-trained visual encoders, while $\\mathcal{T}$ denotes the index set of instruction tokens, specifying requests or questions related to the images. $S_{vv}$ and $S_{vt}$ are utilized to analyze the mechanisms of visual information processing in MLLMs. We define attention interactions among image tokens as intra-visual information flow and those between image and instruction tokens as visual-textual information flow.", + "bbox": [ + 89, + 763, + 483, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We conducted experiments with the LLaVA-v1.5-7B model on the MS COCO dataset under the POPE benchmark, sampling 500 examples for evaluation. Fig. 3 underscores the critical role of the visual-textual information flow within the model's middle layers, specifically from the 8-th to the 15-th layer. This observation indicates that in these layers, visual information interacts intensively with textual information via attention mechanisms, which substantially influences the prediction outcomes.", + "bbox": [ + 511, + 90, + 906, + 227 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/591b4f3e08189d0211b7d6030a04e8d5102e8a900d3a71f513acb19ff213f9c2.jpg", + "image_caption": [ + "Figure 3. The importance of intra-visual flow and visual-textual flow across various layers. The visual-textual information flow in the middle layers has a significant impact on prediction outcomes." + ], + "image_footnote": [], + "bbox": [ + 537, + 244, + 879, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Attention Imbalance Across Modalities", + "text_level": 1, + "bbox": [ + 511, + 476, + 849, + 491 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Sec. 4.1 reveals that the middle layers facilitate crucial fusion, integrating visual and textual inputs into cross-modal semantic representations that drive final predictions. Accordingly, this section will delve deeper into the attention to visual inputs throughout the modality fusion process.", + "bbox": [ + 511, + 498, + 905, + 574 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/5a8021e71dc27b5039408120018516907f081adc2b0b7a6b99e3f5b145ca2cf7.jpg", + "image_caption": [ + "Figure 4. Attention Distribution of Modal Information Across Model Layers. In the middle layers, the model allocates insufficient attention to visual features while disproportionately focusing on system prompts." + ], + "image_footnote": [], + "bbox": [ + 524, + 590, + 880, + 773 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We define the attention allocation, $\\lambda$ , as the aggregate attention score assigned to a specific type of token within a single layer. Accordingly, the attention allocation for sys", + "bbox": [ + 511, + 854, + 905, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "tem prompts, visual features, and user instructions in the $l$ -th layer can be computed as follows:", + "bbox": [ + 89, + 90, + 482, + 121 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {s y s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {S}} A _ {l} (i, j),\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 133, + 367, + 166 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {v i s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {V}} A _ {l} (i, j), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 170, + 482, + 202 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {i n s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {T}} A _ {l} (i, j).\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 205, + 367, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this context, $A_{l}$ represents the attention matrix averaged across all attention heads, while $S$ represents the indices of system tokens. The measures $\\lambda_{sys}^{l}, \\lambda_{vis}^{l}$ , and $\\lambda_{ins}^{l}$ provide insight into the distribution of attention to different modalities across various layers, aiding in understanding the reasons for the underutilization of visual information during the modality fusion process.", + "bbox": [ + 89, + 244, + 483, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The experimental setup aligns with that described in Sec. 4.1. Fig. 4 illustrates the allocation of attention to different modalities across the model's layers. In the middle layers, attention to visual features is markedly lower than that given to system prompts and user instructions. This suggests that during the critical process of modality fusion, the model's focus on visual input is insufficient. As a result, visual information is underutilized, leading to an output distribution skewed toward language priors.", + "bbox": [ + 89, + 351, + 483, + 488 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Insights", + "text_level": 1, + "bbox": [ + 89, + 497, + 189, + 513 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Based on the experimental results presented in Sec. 4.1 and Sec. 4.2, two significant conclusions can be drawn:", + "bbox": [ + 89, + 520, + 482, + 550 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The model performs the crucial fusion of visual and textual modalities in the middle layers, creating cross-modal semantic representations that drive the final predictions.", + "- During this critical fusion process, the model demonstrates inadequate attention to the visual modality." + ], + "bbox": [ + 89, + 553, + 482, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "These findings indicate that models fail to fully utilize visual information, resulting in an excessive dependence on language priors and, subsequently, the occurrence of hallucination phenomena.", + "bbox": [ + 89, + 631, + 483, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Visual Amplification Fusion", + "text_level": 1, + "bbox": [ + 89, + 705, + 349, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Building on the insights presented in Sec. 4, we introduce a hallucination mitigation method called Visual Amplification Fusion (VAF). As illustrated in Fig. 5, This approach heightens attention to visual information during modality fusion, effectively reducing the excessive dependency on language priors and ensuring that the generated content is closely grounded to visual inputs.", + "bbox": [ + 89, + 731, + 483, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Attention Redistribution", + "text_level": 1, + "bbox": [ + 89, + 848, + 316, + 862 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As outlined in Sec. 4, the model performs crucial fusion of visual and textual modalities within the middle layers.", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a79b173bab09191aa22706ca508242c6b90a6e6be41e3fc5b3e73e81ffc48739.jpg", + "image_caption": [ + "Figure 5. Illustration of the Visual Amplification Fusion Method. In the middle layers, we select attention heads highly responsive to visual information, amplifying their focus on visual features while reducing unnecessary attention to system prompts." + ], + "image_footnote": [], + "bbox": [ + 532, + 88, + 883, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, the attention allocated to visual modality information during this process remains insufficient. To address this, we adjust the attention weights in these layers to achieve a more balanced focus.", + "bbox": [ + 511, + 323, + 906, + 383 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/cd4f57fe7e81f9e8b3921d58b7979766e51164cd331cbd4c3783c2b76d61d9c5.jpg", + "image_caption": [ + "Figure 6. Effect of Enhanced Visual Attention on Hallucination Suppression. Increasing attention to visual features in the fusion process of the model's middle layers successfully reduces hallucinations, enabling the model to correct its grape color prediction from \"green\" to \"red\"." + ], + "image_footnote": [], + "bbox": [ + 532, + 392, + 885, + 555 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $A_{l,h}$ denote the attention matrix of the $h$ -th attention head in the $l$ -th layer, and $Z_{l,h}$ represent its corresponding attention score matrix, defined as:", + "bbox": [ + 511, + 652, + 905, + 698 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA _ {l, h} = \\operatorname {s o f t m a x} \\left(Z _ {l, h}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 707, + 903, + 723 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our objective during the modality fusion process is to amplify the model's attention to visual features while curbing an overemphasis on system prompts. This adjustment facilitates improved integration of visual information and reduces over-reliance on language priors. To achieve this, we modify the attention score matrix in the middle layers (i.e., $8 < l < 15$ ) as follows:", + "bbox": [ + 511, + 731, + 906, + 837 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {Z} _ {l, h} = Z _ {l, h} + \\alpha \\cdot M _ {l, h} ^ {\\text {e n h}} \\circ Z _ {l, h} - \\beta \\cdot M _ {l, h} ^ {\\text {s u p}} \\circ Z _ {l, h}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 531, + 844, + 903, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $\\alpha$ is the enhancement coefficient ( $\\alpha > 0$ ), where larger values indicate stronger amplification of visual attenuation.", + "bbox": [ + 511, + 869, + 906, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "tion. The suppression coefficient $\\beta$ ( $0 < \\beta < 1$ ) determines the extent of attention suppression directed at system prompts. The enhancement and suppression mask matrices, $M_{l,h}^{enh}$ and $M_{l,h}^{sup}$ respectively, are defined to guide the modulation of attention elements:", + "bbox": [ + 89, + 90, + 480, + 165 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} M _ {l, h} ^ {e n h} (i, j) = \\mathbb {I} (i \\in \\mathcal {T}, j \\in \\mathcal {V}), \\\\ 1. 5 ^ {\\text {s u p}} (i, j) = \\mathbb {I} (i = \\mathcal {T}, j = \\mathcal {Q}). \\end{array} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 172, + 482, + 199 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nM _ {l, h} ^ {s u p} (i, j) = \\mathbb {I} (i \\in \\mathcal {T}, j \\in \\mathcal {S}).\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 193, + 388, + 212 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "These modifications optimize attention allocation by enhancing the model's focus on visual features during modality fusion and minimizing superfluous attention to system prompts. As illustrated in Fig. 6, preliminary analysis indicates that this approach effectively mitigates hallucination issues by promoting greater attention to visual information.", + "bbox": [ + 89, + 220, + 483, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Visual Perception Restriction", + "text_level": 1, + "bbox": [ + 89, + 319, + 352, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Enhancing visual attention across all attention heads in the middle layers can be overly aggressive and may negatively impact content generation. To address this, we propose a selective enhancement strategy. Specifically, we identify and isolate the attention heads that exhibit higher sensitivity to visual information, which we term visual perception heads. We then restrict the visual attention enhancement to these visual perception heads, ensuring better utilization of visual information while maintaining overall model performance.", + "bbox": [ + 89, + 340, + 483, + 477 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the model, attention heads that allocate more attention to visual features demonstrate heightened sensitivity to visual information. Let $A_{l,h}$ represent the attention matrix of the $h$ -th attention head in the $l$ -th layer of the model, with its corresponding visual attention allocation denoted by $\\lambda_{\\mathrm{vis}}^{l,h}$ . In each attention layer, we identify the attention heads whose visual attention allocation fall within the top", + "bbox": [ + 89, + 478, + 483, + 583 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$50\\%$ and designate them as visual perception heads, subsequently redistributing their attention. The attention matrices of the remaining attention heads are kept unchanged.", + "bbox": [ + 511, + 90, + 905, + 137 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6. Experiment", + "text_level": 1, + "bbox": [ + 511, + 150, + 638, + 167 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This section demonstrates the effectiveness of the proposed VAF method in mitigating hallucinations. Sec. 6.1 outlines the experimental setup, detailing the evaluation benchmarks and VAF parameter configurations. Sec. 6.2 then presents the experimental results from three perspectives: reduction of hallucinations, coherence of generated content, and inference speed. Finally, Sec. 6.3 further verifies the contribution of each VAF component through ablation studies.", + "bbox": [ + 511, + 176, + 906, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 511, + 306, + 720, + 323 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Sec. 6.1.1, we present the selected datasets and evaluation metrics. Sec. 6.1.2 details the chosen MLLM backbone models, and Sec. 6.1.3 outlines the baseline settings.", + "bbox": [ + 511, + 329, + 905, + 375 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1.1. Datasets & Evaluation Metrics", + "text_level": 1, + "bbox": [ + 511, + 383, + 772, + 397 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Polling-based Object Probing Evaluation (POPE). POPE [30] is a novel framework designed to evaluate object hallucinations in MLLMs. Departing from traditional caption-based approaches, POPE frames hallucination detection as a binary task by posing straightforward yes-or-no questions regarding the presence of specific objects in an image (e.g., \"Is there a chair in the image?\"). Performance on POPE is measured across four metrics: Accuracy, Precision, Recall, and F1 score, allowing for a thorough evaluation of hallucinations in MLLMs.", + "bbox": [ + 511, + 402, + 905, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Multimodal Model Evaluation (MME). MME [13] benchmark provides a comprehensive framework for evalu", + "bbox": [ + 511, + 551, + 905, + 584 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/8b5a7597a40413b0b625d92fa3f609bf4ba008a8e7e335f5c1d5bac48951892a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryMethodLLaVA-v1.5-7BLLaVA-v1.5-13BQwen-VL-Chat-7B
AccuracyF1-scoreAccuracyF1-scoreAccuracyF1-score
RandomRegular87.8↑0.087.5↑0.087.6↑0.087.4↑0.088.2↑0.087.9↑0.0
VCD88.4↑0.687.7↑0.288.9↑1.387.8↑0.489.1↑0.988.4↑0.5
ICD88.1↑0.387.6↑0.188.1↑0.587.6↑0.288.9↑0.788.1↑0.2
VAF89.6↑1.889.3↑1.890.1↑2.589.9↑2.590.0↑1.889.7↑1.8
PopularRegular82.5↑0.083.2↑0.082.7↑0.084.1↑0.082.4↑0.083.1↑0.0
VCD83.1↑0.684.1↑0.983.7↑1.085.1↑1.083.0↑0.684.1↑1.0
ICD82.1↓0.482.9↓0.382.9↑0.284.3↑0.283.2↑0.884.5↑1.4
VAF84.5↑2.084.9↑1.785.2↑2.586.4↑2.384.9↑2.585.1↑2.0
AdversarialRegular77.6↑0.079.4↑0.077.8↑0.079.5↑0.077.2↑0.078.9↑0.0
VCD78.1↑0.579.6↑0.278.2↑0.479.7↑0.278.8↑1.680.1↑1.2
ICD78.5↑0.979.9↑0.579.1↑1.380.1↑0.678.1↑0.979.2↑0.3
VAF80.1↑2.581.0↑1.680.7↑2.981.7↑2.280.4↑3.281.2↑2.3
", + "bbox": [ + 163, + 604, + 841, + 857 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Performance on POPE. Results are averaged across the MS-COCO, A-OKVQA, and GQA datasets. The VAF method demonstrates superior hallucination suppression across all three MLLMs. The best performance for each setting is highlighted in red.", + "bbox": [ + 89, + 867, + 905, + 897 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d5ad6983b6128eeec1e4e9721d200c6bac668bca9bea7e5ef70ec2c07258095b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMethodObject-levelAttribute-levelTotal Score
ExistenceCountPositionColor
LLaVA-v1.5-7BRegular185.00↑0.00146.67↑0.00128.33↑0.00150.00↑0.00610.00↑0.00
VCD185.00↑0.00141.33↓5.34128.33↑0.00153.00↑3.00607.66↓2.34
ICD185.00↑0.00148.33↑1.66126.66↓1.67148.33↓1.67608.32↓1.68
VAF195.00↑10.00158.33↑11.66128.33↑0.00155.00↑5.00636.67↑26.67
LLaVA-v1.5-13BRegular185.00↑0.00155.00↑0.00133.33↑0.00165.00↑0.00638.33↑0.00
VCD185.00↑0.00155.00↑0.00130.00↓3.33168.33↑3.33638.33↑0.00
ICD183.33↓1.67153.33↓1.67131.67↓1.66165.00↑0.00633.33↓5.00
VAF195.00↑10.00160.00↑5.00136.67↑3.34170.00↑5.00661.67↑23.34
Qwen-VL-7BRegular158.33↑0.00150.00↑0.00128.33↑0.00170.00↑0.00606.66↑0.00
VCD158.33↑0.00150.00↑0.00133.33↑5.00175.00↑5.00616.66↑10.00
ICD128.33↓30.00151.67↑1.67128.33↑0.00170.00↑0.00578.33↓28.33
VAF165.00↑6.67155.00↑5.00133.33↑5.00175.00↑5.00628.33↑21.67
", + "bbox": [ + 112, + 88, + 890, + 343 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Results on the MME subset. Across three MLLMs, the VAF method achieved the most effective suppression of both object-level and attribute-level hallucinations. The highest scores in each setting are highlighted in red.", + "bbox": [ + 89, + 352, + 906, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ating MLLMs across both perceptual and cognitive dimensions. It consists of ten perception-oriented tasks and four cognition-oriented tasks, with model performance assessed through accuracy metrics. In addition to the full dataset, we leverage specific subsets, such as object existence and counting to analyze object-level hallucinations, while position and color subsets are employed to examine attribute-level hallucinations.", + "bbox": [ + 88, + 407, + 480, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Novel Object Captioning at Scale (Nocaps). NoCaps [3] benchmark is designed to evaluate image captioning models on their ability to describe novel objects absent from standard datasets like COCO. Model performance is quantified using the CIDEr score, providing a basis to assess the coherence and accuracy of generated captions in response to images containing unfamiliar objects.", + "bbox": [ + 88, + 527, + 482, + 635 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.1.2. MLLM Backbones", + "text_level": 1, + "bbox": [ + 89, + 643, + 269, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In comparison to the Q-former structure, linear projection demonstrates greater efficiency in aligning visual and textual features. This advantage is evident in MLLMs with linear projection architectures, such as LLaVA and Qwen-VL, which outperform Q-former-based MLLMs like Instruct-BLIP and MiniGPT4. Based on these findings, we selected three linear-projection-based MLLMs, specifically LLaVA-v1.5-7B, LLaVA-v1.5-13B [35], and Qwen-VL-7B [5], to evaluate the effectiveness of our proposed VAF method. Detailed prompt templates for each model across various benchmarks are included in Sec. 10.", + "bbox": [ + 88, + 661, + 482, + 828 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.1.3. Baseline Settings.", + "text_level": 1, + "bbox": [ + 89, + 835, + 259, + 852 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We primarily compared our approach to the VCD [23] and ICD [47] methods. VCD mitigates hallucinations by contrasting output distributions derived from original and dis", + "bbox": [ + 89, + 854, + 483, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "torted visual inputs, while ICD reduces hallucinated concepts by comparing distributions generated with standard versus disrupted instructions. To ensure consistency and reproducibility in our comparisons, all methods use greedy search. Unless specified otherwise, our experiments set $\\beta = 0.1$ and $\\alpha = 0.15$ .", + "bbox": [ + 511, + 407, + 906, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2. Results and Analysis", + "text_level": 1, + "bbox": [ + 511, + 508, + 709, + 523 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Sec. 6.2.1 examines the effectiveness of various methods in mitigating hallucinations, while Sec. 6.2.2 assesses their impact on the quality of generated content. Sec. 6.2.3 then analyzes the influence of each method on inference speed. Additional experimental results are provided in Sec. 8.", + "bbox": [ + 511, + 530, + 906, + 607 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2.1. Hallucination Mitigation", + "text_level": 1, + "bbox": [ + 511, + 613, + 732, + 628 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Tab. 2 presents the experimental results of the VAF method on the POPE benchmark, with results averaged across the MSCOCO [31], A-OKVQA [42], and GQA [20] datasets. Applied to both the LLaVA-v1.5 model family and the Qwen-VL model, the VAF method consistently surpasses the VCD and ICD methods in reducing hallucinations. Tab. 3 further highlights the performance of VAF on the MME benchmark, demonstrating its effectiveness in suppressing both object-level and attribute-level hallucinations.", + "bbox": [ + 511, + 632, + 905, + 770 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2.2. Coherence of Generated Content", + "text_level": 1, + "bbox": [ + 511, + 776, + 790, + 789 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Tab. 4 presents the experimental results for various methods on the Nocaps and ScienceQA datasets. It is evident that VCD and ICD substantially degrade the quality of the generated content. Specifically, on the Nocaps dataset, VCD and ICD reduce CIDEr scores by $18\\%$ and $27\\%$ , respectively. This degradation primarily arises from the crude disruption of language priors by contrastive decoding methods,", + "bbox": [ + 511, + 794, + 906, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ed04285b06690a6f4f7556e8adf2d935197d4d1b0fc3e108bbbb0426666d9aa5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelDecodingScienceQANocaps
AccuracyCIDEr
LLaVA-v1.5-7BRegular68.078.7
VCD64.565.7
ICD62.462.3
VAF68.578.8
LLaVA-v1.5-13BRegular71.682.6
VCD70.068.9
ICD69.260.3
VAF71.782.3
", + "bbox": [ + 101, + 89, + 478, + 275 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "which leads to generated content lacking coherence and accuracy. By contrast, our method demonstrates minimal negative impact on prediction results, maintaining both coherence and accuracy effectively.", + "bbox": [ + 89, + 340, + 482, + 402 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.2.3. Inference Speed", + "text_level": 1, + "bbox": [ + 89, + 409, + 250, + 425 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fig. 7 illustrates the impact of different strategies on model inference speed within the Nocaps dataset. In comparison, the VCD and ICD methods nearly double the inference time due to the need to process contrastive input samples, whereas the VAF method has minimal impact on the inference speed of multimodal large language models.", + "bbox": [ + 89, + 428, + 483, + 518 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a52934bd49c85c2eeb682f6e85fd76b33397436a158a1df077b2d7eddeb2b2c9.jpg", + "image_caption": [ + "Figure 7. Comparison of different strategies on inference speed. The VCD and ICD methods reduce inference speed by $50\\%$ , whereas the VAF method shows minimal impact." + ], + "image_footnote": [], + "bbox": [ + 112, + 537, + 442, + 714 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.3. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 787, + 243, + 804 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation studies on the enhancement coefficient $\\alpha$ were conducted using the COCO-Random dataset within the POPE benchmark to understand its influence on model performance. Fig. 8 demonstrates that when $0 < \\alpha < 0.25$ , model hallucinations are effectively suppressed. However, when $\\alpha$ surpasses 0.25, performance starts to degrade. We", + "bbox": [ + 89, + 810, + 483, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "propose that this reduction in performance may stem from an excessive focus on visual features, disrupting the balanced integration of language information and diminishing overall model effectiveness.", + "bbox": [ + 511, + 90, + 905, + 151 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/43f9cde0505695001d35da1fe6336854987e8cb1c5daf157a6cf91f7c5f9b531.jpg", + "image_caption": [ + "Figure 8. Ablation study of $\\alpha$ on the POPE benchmark." + ], + "image_footnote": [], + "bbox": [ + 537, + 162, + 879, + 342 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We performed ablation studies on the visual perception restriction mechanism, evaluating its impact on the POPE and Nocaps benchmarks. Tab. 5 highlights the effects of restricting attention reallocation to visual perception heads. Increasing attention to visual features alone reduces model hallucinations, while confining this reallocation strategy to visual perception heads minimizes adverse effects on content quality. More ablation studies can be found in Sec. 9.", + "bbox": [ + 511, + 371, + 906, + 492 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/df3d04f20204c760b313ec2c84457e3343648bd94b9f015e7e1ca4671fe51867.jpg", + "table_caption": [ + "Table 4. Results on SQA and Nocaps datasets. The highest and second-highest scores are marked in red and blue, respectively." + ], + "table_footnote": [], + "table_body": "
ModelVisual RestrictionPOPENocaps
LLaVA-7B89.878.8
X89.976.4
LLaVA-13B90.282.3
X90.081.1
", + "bbox": [ + 527, + 503, + 893, + 608 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Ablation Study of Visual Perception Restriction Mechanism. Restricting attention redistribution to the visual perception heads more effectively preserves the quality of generated content.", + "bbox": [ + 511, + 619, + 905, + 662 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. conclusion", + "text_level": 1, + "bbox": [ + 513, + 679, + 627, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we identify two key drawbacks of using contrastive decoding to mitigate hallucinations in MLLMs: reduced quality of generated content and slower inference speed. To address these challenges, we propose a novel approach, Visual Amplification Fusion, which effectively mitigates hallucinations while preserving both inference speed and content generation quality. By enhancing the attention to visual features during modality fusion, VAF minimizes the over-reliance on language priors, ensuring a high degree of consistency between generated content and visual inputs. Extensive experiments across multiple benchmarks and MLLMs demonstrate that VAF provides a clear advantage in hallucination mitigation.", + "bbox": [ + 511, + 704, + 906, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 91, + 90, + 258, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This work is supported by the National Natural Science Foundation of China under Grant 62176246. This work is also supported by Anhui Province Key Research and Development Plan (202304a05020045), Anhui Province Natural Science Foundation (2208085UD17) and National Natural Science Foundation of China under Grant 62406098.", + "bbox": [ + 89, + 114, + 483, + 205 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 217, + 187, + 233 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Vedika Agarwal, Rakshith Shetty, and Mario Fritz. Towards causal vqa: Revealing and reducing spurious correlations by invariant and covariant semantic editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9690-9698, 2020. 1", + "[2] Aishwarya Agrawal, Dhruv Batra, and Devi Parikh. Analyzing the behavior of visual question answering models. arXiv preprint arXiv:1606.07356, 2016. 1", + "[3] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV). IEEE, 2019. 7", + "[4] Jinze Bai, Shuai Bai, and et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 2", + "[5] Jinze Bai, Shuai Bai, and et al. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 2, 7", + "[6] Rohan Bavishi, Erich Elsen, and et al. Introducing our multimodal models, 2023. 2", + "[7] Ali Furkan Biten, Lluís Gómez, and Dimosthenis Karatzas. Let there be a clock on the beach: Reducing object hallucination in image captioning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1381-1390, 2022. 1", + "[8] Keqin Chen, Zhao Zhang, and et al. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 1", + "[9] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. arXiv preprint arXiv:2310.01957, 2023. 1", + "[10] Zhe Chen, Weiyun Wang, and et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2", + "[11] Wei-Lin Chiang and Zhuohan et al Li. Vicuna: An opensource chatbot impressing gpt-4 with $90\\%$ chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2023. 2", + "[12] Wenliang Dai and Junnan Li et al. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 2", + "[13] Chaoyou Fu, Peixian Chen, and et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 6" + ], + "bbox": [ + 93, + 243, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 1", + "[15] Anisha Gunjal, Jihan Yin, and Erhan Bas. Detecting and preventing hallucinations in large vision language models. arXiv preprint arXiv:2308.06394, 2023. 1", + "[16] Vipul Gupta, Zhuowan Li, Adam Kortylewski, Chenyu Zhang, Yingwei Li, and Alan Yuille. Swapmix: Diagnosing and regularizing the over-reliance on visual context in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5078-5088, 2022. 1", + "[17] Yudong Han, Liqiang Nie, Jianhua Yin, Jianlong Wu, and Yan Yan. Visual perturbation-aware collaborative learning for overcoming the language prior problem. arXiv preprint arXiv:2207.11850, 2022. 1", + "[18] Mingzhe Hu, Shaoyan Pan, Yuheng Li, and Xiaofeng Yang. Advancing medical imaging with language models: A journey from n-grams to chatgpt. arXiv preprint arXiv:2304.04920, 2023. 1", + "[19] Qidong Huang, Xiaoyi Dong, Pan Zhang, Bin Wang, Conghui He, Jiaqi Wang, Dahua Lin, Weiming Zhang, and Nenghai Yu. Opera: Alleviating hallucination in multimodal large language models via over-trust penalty and retrospection-allocation. In CVPR, pages 13418-13427, 2024. 2", + "[20] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In CVPR, pages 6700-6709, 2019. 7", + "[21] Fushuo Huo, Wenchao Xu, Zhong Zhang, Haozhao Wang, Zhicheng Chen, and Peilin Zhao. Self-introspective decoding: Alleviating hallucinations for large vision-language models, 2024. 2", + "[22] Chaoya Jiang, Haiyang Xu, and et al. Hallucination augmented contrastive learning for multimodal large language model. In CVPR, pages 27036-27046, 2024. 2", + "[23] Sicong Leng, Hang Zhang, and et al. Mitigating object hallucinations in large vision-language models through visual contrastive decoding. In CVPR, pages 13872-13882, 2024. 2, 7", + "[24] Bo Li, Yuanhan Zhang, and et al. Mimic-it: Multi-modal in-context instruction tuning. arXiv preprint arXiv:2306.05425, 2023. 2", + "[25] Bo Li, Kaichen Zhang, and et al. Llava next: Stronger llms supercharge multimodal capabilities in the wild, 2024. 2", + "[26] Chunyuan Li, Cliff Wong, and et al. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. In NeurIPS, pages 28541-28564, 2023. 2", + "[27] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023. 1, 2", + "[28] Xiang Lisa Li, Ari Holtzman, and et al. Contrastive decoding: Open-ended text generation as optimization. arXiv preprint arXiv:2210.15097, 2022. 2" + ], + "bbox": [ + 516, + 92, + 905, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[29] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 1", + "[30] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, pages 292-305, 2023. 6", + "[31] Tsung-Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014. 7", + "[32] Fuxiao Liu, Kevin Lin, Linjie Li, Jianfeng Wang, Yaser Ya-coob, and Lijuan Wang. Mitigating hallucination in large multi-modal models via robust instruction tuning. arXiv preprint arXiv:2306.14565, 2023. 1", + "[33] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, pages 34892-34916, 2023. 1, 2", + "[34] Haokun Liu, Yaonan Zhu, Kenji Kato, Izumi Kondo, Tadayoshi Aoyama, and Yasuhisa Hasegawa. Lm-based human-robot collaboration framework for manipulation tasks. arXiv preprint arXiv:2308.14972, 2023. 1", + "[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In CVPR, pages 26296-26306, 2024. 1, 7", + "[36] Zhi-Song Liu, Robin Courant, and Vicky Kalogeiton. Funnynet-w: Multimodal learning of funny moments in videos in the wild. International Journal of Computer Vision, pages 1-22, 2024. 2", + "[37] Holy Lvenia, Wenliang Dai, Samuel Cahyawijaya, Ziwei Ji, and Pascale Fung. Negative object presence evaluation (nope) to measure object hallucination in vision-language models. arXiv preprint arXiv:2310.05338, 2023. 1", + "[38] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In The 36th Conference on Neural Information Processing Systems (NeurIPS), 2022. 3", + "[39] Jinjie Mai, Jun Chen, Bing Li, Guocheng Qian, Mohamed Elhoseiny, and Bernard Ghanem. Llm as a robotic brain: Unifying egocentric memory and control. arXiv preprint arXiv:2304.09349, 2023. 1", + "[40] AI Meta. Introducing meta llama 3: The most capable openly available llm to date. Meta AI, 2024. 2", + "[41] Yulei Niu, Kaihua Tang, Hanwang Zhang, Zhiwu Lu, XianSheng Hua, and Ji-Rong Wen. Counterfactual vqa: A cause-effect look at language bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12700-12710, 2021. 1", + "[42] Dustin Schwenk, Apoorv Khandelwal, and et al. A-okvqa: A benchmark for visual question answering using world knowledge. In ECCV, pages 146–162, 2022. 7", + "[43] Rohan Taori, Ishaan Gulrajani, and et al. Stanford alpaca: an instruction-following llama model (2023). URL https://github.com/tatsu-lab/stanford_alpaca, 1(9), 2023. 2" + ], + "bbox": [ + 91, + 90, + 480, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[44] Hugo Touvron, Thibaut Lavril, and et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.", + "[45] Hugo Touvron, Louis Martin, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 2", + "[46] Sheng Wang, Zihao Zhao, Xi Ouyang, Qian Wang, and Dinggang Shen. Chatcad: Interactive computer-aided diagnosis on medical image using large language models. arXiv preprint arXiv:2302.07257, 2023. 1", + "[47] Xintong Wang, Jingheng Pan, and et al. Mitigating hallucinations in large vision-language models with instruction contrastive decoding. arXiv preprint arXiv:2403.18715, 2024.2, 7", + "[48] Yike Wu, Yu Zhao, Shiwan Zhao, Ying Zhang, Xiaojie Yuan, Guoqing Zhao, and Ning Jiang. Overcoming language priors in visual question answering via distinguishing superficially similar instances. arXiv preprint arXiv:2209.08529, 2022. 1", + "[49] Zhenyu Wu, Ziwei Wang, Xiuwei Xu, Jiwen Lu, and Haibin Yan. Embodied task planning with large language models. arXiv preprint arXiv:2307.01848, 2023. 1", + "[50] Hong Yan, Lijun Liu, Xupeng Feng, and Qingsong Huang. Overcoming language priors with self-contrastive learning for visual question answering. *Multimedia Tools and Applications*, 82(11):16343–16358, 2023. 1", + "[51] Qinghao Ye, Haiyang Xu, and et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2", + "[52] Shilong Zhang, Peize Sun, and et al. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023. 2", + "[53] Ren Zhibo, Wang Huizhen, Zhu Muhua, Wang Yichao, Xiao Tong, and Zhu Jingbo. Overcoming language priors with counterfactual inference for visual question answering. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics, pages 600-610, 2023. 1", + "[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 130(9):2337-2348, 2022. 1", + "[55] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2" + ], + "bbox": [ + 514, + 90, + 905, + 700 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models", + "text_level": 1, + "bbox": [ + 102, + 85, + 893, + 130 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "8. Additional Experimental Results", + "text_level": 1, + "bbox": [ + 89, + 178, + 390, + 195 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Sec. 8.1 presents the additional experimental results across all tasks in the MME benchmark. Sec. 8.2 details the experimental outcomes on the three datasets within the POPE benchmark. Sec. 8.3 compares the inference speeds and memory usage of various methods on ScienceQA and Nocaps. Sec. 8.4 highlights case studies of the VAF method on the LLaVA-Bench dataset.", + "bbox": [ + 89, + 200, + 483, + 306 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "8.1. Detailed Experimental Results on MME", + "text_level": 1, + "bbox": [ + 511, + 178, + 856, + 195 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Fig. 9 and Fig. 10 present the performance of the LLaVA model family on perception-related tasks within the MME benchmark. Models utilizing the VAF method demonstrate significantly better performance compared to those employing the VCD method. Notably, VAF achieves consistent leadership across all tasks with the LLaVA-v1.5-13B model, likely due to its ability to balance attention between", + "bbox": [ + 511, + 200, + 905, + 306 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/11b3663bf0024698189fe6f3112ff21dcb6c23465d60d7948074f56300a00e95.jpg", + "image_caption": [ + "Figure 9. Performance of LLaVA-v1.5-7B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks." + ], + "image_footnote": [], + "bbox": [ + 158, + 333, + 805, + 569 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/451a93053e14534301efd50b796019652e06c5d881b0caef998aa921869931cd.jpg", + "image_caption": [ + "Figure 10. Performance of LLaVA-v1.5-13B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks." + ], + "image_footnote": [], + "bbox": [ + 156, + 625, + 805, + 861 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d68d5414ee93abce830c76d8bad3c0dd6824d54150d5ca53159dc02a3792d344.jpg", + "image_caption": [ + "Figure 11. Performance of the LLaVA-v1.5-7B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method." + ], + "image_footnote": [], + "bbox": [ + 158, + 94, + 803, + 330 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/2888d89da363d0b60fede2e6cffd441ac8299d0e420dbeb7c1d6e1be961f06d9.jpg", + "image_caption": [ + "Figure 12. Performance of the LLaVA-v1.5-13B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method." + ], + "image_footnote": [], + "bbox": [ + 156, + 385, + 805, + 623 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "visual and language modalities, ensuring generated content aligns more closely with visual inputs.", + "bbox": [ + 89, + 684, + 482, + 715 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Fig. 11 and Fig. 12 illustrate the performance of LLaVA model family on cognition-related tasks within the MME benchmark. The application of the VCD method significantly impaired the model's performance on these tasks, likely due to its disruptive effect on linguistic priors. In contrast, VAF method not only avoided such negative impacts but also resulted in a slight performance improvement. This improvement is attributed to VAF's ability to precisely resolve the model's tendency to overlook visual features during the critical fusion stage, facilitating better integration of visual information while preserving its effective use of linguistic information.", + "bbox": [ + 88, + 719, + 482, + 900 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "8.2. Detailed Experimental Results on POPE", + "text_level": 1, + "bbox": [ + 511, + 683, + 859, + 700 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Tab. 6 and Tab. 9 summarize the experimental results of the LLaVA-v.15 model family on the MSCOCO, A-OKVQA, and GQA datasets within the POPE benchmark. The results highlight that our approach consistently delivers more stable and significantly improved hallucination suppression compared to the VCD method. This advantage stems from our direct enhancement of attention to visual features during the modality fusion process, enabling balanced outputs across both visual and linguistic modalities. In contrast, the VCD method relies on suppressing language priors to indirectly enhance attention to visual information. Decoding method employed in all experiments utilizes greedy search.", + "bbox": [ + 511, + 719, + 906, + 902 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/733a7a722b6287fcc9fdac7f056498e59787768f80cf66a874f9f622f4187058.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.294.281.587.4
VCD88.594.481.887.6
VAF89.892.986.289.4
PopularRegular86.189.981.585.5
VCD86.390.081.785.8
VAF87.588.686.287.4
AdversarialRegular82.382.981.382.1
VCD82.382.981.682.4
VAF83.486.878.982.6
A-OKVQARandomRegular87.687.687.787.6
VCD87.787.887.687.8
VAF89.491.786.689.1
PopularRegular81.978.487.782.8
VCD82.178.587.983.1
VAF84.282.686.684.6
AdversarialRegular74.368.887.777.1
VCD72.468.087.476.7
VAF77.272.986.679.2
GQARandomRegular88.087.189.388.2
VCD88.687.489.588.8
VAF89.590.888.089.4
PopularRegular79.474.489.381.1
VCD79.974.689.581.7
VAF81.878.388.082.9
AdversarialRegular76.370.689.378.9
VCD75.270.289.978.3
VAF79.775.488.081.2
", + "bbox": [ + 212, + 87, + 795, + 583 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/703f7ad82282a916228916e145b95efd4e173a30ad5a5153fe76f0e5c7ac0718.jpg", + "table_caption": [ + "Table 6. Experimental results of LLaVA-1.5-7B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red." + ], + "table_footnote": [], + "table_body": "
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular88.25:3214.5G0.111s
VCD88.510:3115.7G0.210s
VAF89.85:4814.5G0.116s
LLaVA-v1.5-13BRegular88.48:3926.7G0.173s
VCD88.619:3827.8G0.392s
VAF90.28:4526.7G0.175s
", + "bbox": [ + 176, + 637, + 826, + 771 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 7. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on POPE benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red.", + "bbox": [ + 89, + 782, + 906, + 811 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8.3. Comparison of Inference Speeds", + "text_level": 1, + "bbox": [ + 89, + 837, + 375, + 854 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Tab. 7 and Tab. 8 assess the impact of various methods on the LLaVA-v1.5 model family, focusing on inference speed", + "bbox": [ + 89, + 869, + 483, + 902 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "and GPU memory usage. The results indicate that VCD significantly slows down inference, whereas our proposed method has a minimal effect. Furthermore, our method introduces no additional GPU memory requirements, in con", + "bbox": [ + 511, + 838, + 906, + 900 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "trast to VCD, which incurs substantial GPU memory overhead. This efficiency is achieved because our approach eliminates the need for extra processing of contrastive inputs, thereby significantly reducing computational over", + "bbox": [ + 89, + 90, + 485, + 154 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "head. All experiments were performed on a server equipped with a single A800 80G GPU, employing greedy search as the decoding strategy.", + "bbox": [ + 511, + 90, + 908, + 137 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/7a443757c9f12c09f98302669fb4ab28d8f5b4293a012d1a4cd3b4d9d4bd0d9f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular68.00:36:3914.5G0.488s
VCD64.51:18:4715.7G1.058s
VAF68.50:36:4114.5G0.489s
LLaVA-v1.5-13BRegular71.60:45:2026.7G0.604s
VCD70.01:46:5927.8G1.426s
VAF71.70:48:2426.7G0.645s
", + "bbox": [ + 176, + 172, + 828, + 309 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/b57391157530332b83eb40b4f74b2031f218abfbe584bca644d6cd2c3c90407f.jpg", + "table_caption": [ + "Table 8. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on Nocaps benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red." + ], + "table_footnote": [], + "table_body": "
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.494.681.687.6
VCD88.695.081.887.7
VAF90.294.285.689.7
PopularRegular86.991.381.686.2
VCD87.091.482.086.4
VAF88.490.685.688.0
AdversarialRegular83.484.981.483.1
VCD83.785.181.783.1
VAF84.583.885.584.7
A-OKVQARandomRegular88.088.887.187.9
VCD88.289.287.587.9
VAF89.491.486.889.1
PopularRegular83.981.787.184.3
VCD84.281.787.384.3
VAF86.085.486.886.1
AdversarialRegular76.071.087.178.2
VCD76.471.287.178.3
VAF78.274.186.879.9
GQARandomRegular88.387.889.088.4
VCD88.388.189.388.5
VAF89.787.892.289.9
PopularRegular83.379.889.084.1
VCD83.280.089.284.1
VAF85.283.088.685.7
AdversarialRegular78.573.389.080.4
VCD78.773.388.980.3
VAF80.876.688.682.1
", + "bbox": [ + 210, + 361, + 795, + 858 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 9. Experimental results of LLaVA-1.5-13B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red.", + "bbox": [ + 89, + 867, + 906, + 897 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a65c83187bf6f743d3ed6f698d34b951a870192e87a0223814be4491c320a2c8.jpg", + "image_caption": [ + "Prompt: How many uncut fruits are in the image?", + "Vanilla Decoding: There are five uncut fruits in the image.", + "Visual Contrastive Decoding: There are four uncut fruits in the image." + ], + "image_footnote": [], + "bbox": [ + 187, + 102, + 325, + 202 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/1a904075e7cad436ffd0ce8802753c74ac8c0ab0e5717e2ca6c9dfb9b77aa3ea.jpg", + "image_caption": [ + "Prompt: How many coffee mugs are in the set?", + "Vanilla Decoding: There are four coffee mugs in the set.", + "Visual Contrastive Decoding: There are three coffee mugs in the set.", + "Visual Amplification Fusion: There are three coffee mugs in the set." + ], + "image_footnote": [], + "bbox": [ + 521, + 102, + 658, + 200 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/d96465a44578b92b95d86a245007c48c89608d96e2b641f2c2af97ae5787d3c4.jpg", + "image_caption": [ + "Visual Amplification Fusion: There are three uncut fruits in the image.", + "Figure 13. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on numerical perception tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red.", + "Figure 14. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on complex reasoning tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + ], + "image_footnote": [], + "bbox": [ + 187, + 460, + 372, + 595 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt: What is unusual about this image?", + "text_level": 1, + "bbox": [ + 413, + 467, + 630, + 479 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Vanilla Decoding: The unusual aspect of this image is that a man is ironing clothes while standing on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is usually done indoors, in a more controlled environment. The man's actions pose a risk to both himself and the passengers of the taxi, as he could lose his balance and fall, causing injury or damage to the vehicle. Additionally, the presence of a washing machine in the back of the taxi is also unconventional, as it is not a common feature in taxis.", + "bbox": [ + 419, + 503, + 767, + 592 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Visual Contrastive Decoding: An unconventional aspect of the image is the presence of a man standing on the back of a yellow car while holding multiple clothes, seemingly ironing them. Ironing is typically done in a more controlled and stationary environment, such as a home or an ironing board in an indoor setting. However, in this case, the man is in a busy city driving a yellow taxi, which defies the traditional approach to ironing and creates an unusual scene. It may draw the attention of onlookers and passersby due to the uncommon behavior, making this image memorable.", + "bbox": [ + 419, + 614, + 767, + 715 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Visual Amplification Fusion: The unusual aspect of this image is that a man is ironing clothes while sitting on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is generally done in a more controlled environment, such as a home or a laundromat. The man's position on the back of the taxi, combined with the act of ironing, creates an unconventional and unexpected scene.", + "bbox": [ + 419, + 739, + 767, + 806 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/5e53b49c44c6e0501a0e4aa3ecd37b74a84fb37345c7e5c3fb0250cb93d4c76f.jpg", + "image_caption": [ + "Prompt: Describe this photo in detail." + ], + "image_footnote": [], + "bbox": [ + 225, + 99, + 392, + 220 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Vanilla Decoding: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is sitting in a chair, looking directly at the viewer. The painting is quite detailed, capturing the dog's facial expression and the intricate design of the dress. In the background, there is a person partially visible on the left side of the painting. The overall scene is quite captivating and unique, as it combines the artistic style of the Renaissance with the subject matter of a dog.", + "bbox": [ + 428, + 141, + 738, + 220 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Visual Contrastive Decoding: The image features a unique painting on a canvas, depicting a portrait of a bulldog in Renaissance style, giving it a Renaissance era appearance. In this painting, the bulldog is presented as a woman, complete with a veil on its head, looking directly to its right.", + "bbox": [ + 429, + 243, + 738, + 292 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The painting also includes several elements of landscape and architecture, giving a sense of a real setting. Apart from the central dog figure, there are three smaller figures or figures in the background, adding more depth and life to the painting. In the forefront, a chair is visible, positioned to the left of the dog, further enhancing the painting's depiction of a realistic scene.", + "bbox": [ + 429, + 292, + 738, + 352 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Visual Amplification Fusion: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is positioned in the center of the painting, and appears to be staring at the viewer.", + "bbox": [ + 428, + 375, + 736, + 414 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/88514ab10ec1b0add4b169678541b00295fc37deda1c2ba83f677b1b7cc5202d.jpg", + "image_caption": [ + "Figure 15. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red.", + "Prompt: Describe this photo in detail.", + "Figure 16. Additional example of VAF correcting model hallucinations on LLaVA-Bench, primarily focusing on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + ], + "image_footnote": [], + "bbox": [ + 225, + 496, + 390, + 614 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Vanilla Decoding: The image features a beautiful display of three ripe and juicy passion fruit, cut in half to reveal their exquisite white meat inside. The ripe fruit is placed in a wooden bowl, making them the focal point of the image. Each fruit half rests in a different location on the table, showcasing the various sizes of these unique fruit-like berries. The close-up view of the cut open fruits and the table setting creates an appetizing scene, highlighting the natural color of the passion fruit and its appeal to anyone who enjoys a fresh and tangy experience with a hint of exotic taste.", + "bbox": [ + 429, + 536, + 740, + 625 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Visual Contrastive Decoding: The image features a wooden table with a basket of fruit, including a variety of fruits such as apples, oranges, and bananas. There are three apples on the table, with one placed near the left side, another in the middle, and the third one on the right side. In addition to the apples, there are two oranges on the table, one located in the middle and the other towards the right side. A banana can be seen on the left side of the table, and a cluster of grapes is also present, with some grapes placed near the center and others on the right side of the table. The assortment of fruits creates a colorful and appetizing display.", + "bbox": [ + 431, + 646, + 740, + 744 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Visual Amplification Fusion: The image features a wooden table with a basket of four purple fruits, possibly eggplants or figs, placed on it. The fruits are cut open, revealing their inwards. The fruits are arranged in a way that they are sitting on top of each other, with one fruit on the left side, another in the middle, and the third on the right side of the table. The fourth fruit is placed at the edge of the table.", + "bbox": [ + 433, + 763, + 740, + 823 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/186b4777820d03711bd9a905a5233c8f91c9c5ca8d2c8ed43bec3792005c511b.jpg", + "image_caption": [ + "(a) Accuracy Metric" + ], + "image_footnote": [], + "bbox": [ + 96, + 90, + 472, + 309 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/792b6994a955d9a584b4ffe2d499b18844b6c1008aa9d461b3041cfa70c08cb0.jpg", + "image_caption": [ + "(b) F1-Score Metirc" + ], + "image_footnote": [], + "bbox": [ + 527, + 90, + 897, + 309 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "8.4. Case study on LLaVA-Bench", + "text_level": 1, + "bbox": [ + 89, + 410, + 349, + 426 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Fig. 13, Fig. 14, Fig. 15, and Fig. 16 illustrate the effectiveness of various methods in mitigating model hallucinations on LLaVA-Bench. Across tasks such as numerical perception, image description, and complex reasoning, our approach demonstrates consistently superior performance in suppressing hallucinations. Experiments are conducted using LLaVA-v1.5-7B model.", + "bbox": [ + 89, + 431, + 483, + 537 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "9. Additional Ablation Studies", + "text_level": 1, + "bbox": [ + 89, + 553, + 349, + 569 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In Sec. 9.1, we examine how enhancing attention to visual features at different levels affects hallucination suppression. In Sec. 9.2, we analyze the influence of varying the suppression coefficient $\\beta$ on mitigating hallucinations. Finally, in Sec. 9.3, we evaluate the performance of the VAF method in suppressing hallucinations under various sampling strategies.", + "bbox": [ + 89, + 579, + 483, + 686 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "9.1. Effect of Enhancement at Different Layers", + "text_level": 1, + "bbox": [ + 89, + 696, + 452, + 713 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We enhanced attention to visual features in layers 0-5, 10-15, and 20-25. Fig. 17 demonstrates the impact of enhancing visual attention at different layers. Notably, enhancing attention in the middle layers significantly reduces hallucination, while modifications in the shallow and deep layers have minimal effect on the generation results. As discussed in Sec. 4.1, this is because the model primarily integrates modality information in the middle layers. Thus, enhancing the focus on visual features during this phase is crucial for effectively mitigating hallucination. Experiments are conducted using LLaVA-v1.5-7B model on COCO-Random dataset from the POPE Benchmark.", + "bbox": [ + 89, + 719, + 483, + 900 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ffcc6e9e895eff01f780f8bd05da7688628711fa34ba7e6cfa9ed07eea6550d6.jpg", + "image_caption": [ + "Figure 17. The Effect of Enhancing Visual Attention at Different Layers on Prediction Accuracy. This experiment, conducted with the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark, demonstrates that enhancing attention to visual features in the model's middle layers significantly reduces hallucinations.", + "Figure 18. The effect of the suppression coefficient $\\beta$ on the VAF method's ability to mitigate model hallucinations. The experiments were performed using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark." + ], + "image_footnote": [], + "bbox": [ + 542, + 412, + 879, + 612 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "9.2. Effect of Suppression Coefficient", + "text_level": 1, + "bbox": [ + 511, + 710, + 802, + 727 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We assessed the effect of the suppression coefficient $\\beta$ on the performance of the VAF method using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark. In our experiments, $\\alpha$ was fixed at 0.15, while $\\beta$ was systematically adjusted. The results, presented in Fig. 18, reveal that when $0 < \\beta < 0.15$ , VAF significantly enhanced its ability to suppress hallucinations in the model. This improvement is likely due to VAF reducing redundant attention to system prompts in this range, thereby reinforcing focus on visual features and enabling generated content to better align with the visual input. Conversely,", + "bbox": [ + 511, + 734, + 906, + 902 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/0b87569274e178e6f9394d30e1099b4c2100d12bbd967c3a449cc44082e27c39.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Sampling StrategyMethodAccuracyPrecisionRecallF1-Score
GreedyRegular88.294.481.487.4
VAF89.892.986.289.4
Direct SamplingRegular82.990.471.380.9
VAF83.990.680.985
Top PRegular84.392.172.582.1
VAF85.789.682.485.9
Top KRegular83.391.972.881.1
VAF8588.381.984.9
Top K + Temp0.5Regular85.595.174.984.5
VAF86.791.283.487
Top K + Temp1.5Regular80.487.170.277.8
VAF82.18678.281.9
", + "bbox": [ + 233, + 88, + 772, + 340 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 10. Effectiveness of the VAF method in mitigating model hallucination under different sampling strategies. The highest score in each setting is highlighted in red. Experiments were conducted using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark.", + "bbox": [ + 89, + 351, + 906, + 393 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "when $\\beta > 0.15$ , the model's performance deteriorated. We hypothesize that this decline stems from excessive suppression of attention to system prompts, which disrupts the delicate balance required for effectively integrating multimodal information, ultimately leading to a degradation in overall performance.", + "bbox": [ + 89, + 419, + 483, + 511 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "9.3. Effect of Different Sampling Strategies", + "text_level": 1, + "bbox": [ + 89, + 522, + 424, + 540 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We evaluated the effectiveness of the VAF method in mitigating model hallucination under different sampling strategies using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark. The experimental results, shown in Tab. 10, indicate that the VAF method significantly mitigates model hallucination across all sampling strategies.", + "bbox": [ + 89, + 546, + 483, + 652 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "10. Prompts for Different Tasks", + "text_level": 1, + "bbox": [ + 91, + 667, + 359, + 686 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "POPE Dataset. In the POPE dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red.", + "bbox": [ + 89, + 694, + 483, + 742 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", + "bbox": [ + 114, + 767, + 457, + 815 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "USER: IMAGE", + "text_level": 1, + "bbox": [ + 116, + 820, + 235, + 834 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Is there a cow in the image? Please just answer yes or no.", + "bbox": [ + 174, + 835, + 455, + 866 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ASSISTANT:", + "text_level": 1, + "bbox": [ + 117, + 872, + 215, + 886 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Nocaps Datasets. In Nocaps and Flickr30k dataset, input template for the model is presented below, with prompts highlighted in green and image highlighted in red.", + "bbox": [ + 511, + 419, + 906, + 465 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", + "bbox": [ + 537, + 489, + 880, + 536 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "USER: IMAGE", + "text_level": 1, + "bbox": [ + 539, + 542, + 658, + 556 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Provide a one-sentence caption for the provided image.", + "bbox": [ + 596, + 558, + 879, + 588 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ASSISTANT:", + "text_level": 1, + "bbox": [ + 540, + 595, + 637, + 609 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Sci-VQA Dataset. In the Sci-VQA dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red.", + "bbox": [ + 511, + 636, + 906, + 683 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", + "bbox": [ + 537, + 707, + 880, + 753 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "USER: IMAGE", + "text_level": 1, + "bbox": [ + 540, + 760, + 658, + 773 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Context: Select the best answer.", + "bbox": [ + 596, + 775, + 821, + 787 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Which property do these three objects have in common?", + "bbox": [ + 596, + 790, + 879, + 818 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A. shiny B. slippery C. opaque", + "bbox": [ + 596, + 820, + 802, + 835 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Answer with the option's letter from the given choices directly.", + "bbox": [ + 596, + 835, + 879, + 864 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ASSISTANT:", + "text_level": 1, + "bbox": [ + 540, + 872, + 637, + 886 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_model.json b/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4adfefacafd3185f4f52360843f1d8ed293627c9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_model.json @@ -0,0 +1,3481 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.256, + 0.058, + 0.706 + ], + "angle": 270, + "content": "arXiv:2503.13107v2 [cs.CV] 27 May 2025" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.13, + 0.895, + 0.176 + ], + "angle": 0, + "content": "ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.339, + 0.204, + 0.664, + 0.222 + ], + "angle": 0, + "content": "Hao Yin Gunagzong Si Zilei Wang*" + }, + { + "type": "text", + "bbox": [ + 0.312, + 0.223, + 0.686, + 0.24 + ], + "angle": 0, + "content": "University of Science and Technology of China" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.242, + 0.76, + 0.256 + ], + "angle": 0, + "content": "{yinhnavi, guangzongsi}@mail.ustc.edu.cn, zlwang@ustc.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.292, + 0.326, + 0.307 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.323, + 0.485, + 0.687 + ], + "angle": 0, + "content": "Contrastive decoding strategies are widely used to mitigate object hallucinations in multimodal large language models (MLLMs). By reducing over-reliance on language priors, these strategies ensure that generated content remains closely grounded in visual inputs, producing contextually accurate outputs. Since contrastive decoding requires no additional training or external tools, it offers both computational efficiency and versatility, making it highly attractive. However, these methods present two main limitations: (1) bluntly suppressing language priors can compromise coherence and accuracy of generated content, and (2) processing contrastive inputs adds computational load, significantly slowing inference speed. To address these challenges, we propose Visual Amplification Fusion (VAF), a plug-and-play technique that enhances attention to visual signals within the model's middle layers, where modality fusion predominantly occurs. This approach enables more effective capture of visual features, reducing the model's bias toward language modality. Experimental results demonstrate that VAF significantly reduces hallucinations across various MLLMs without affecting inference speed, while maintaining coherence and accuracy in generated outputs. The code is available at https://github.com/ustc-hyin/ClearSight." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.716, + 0.222, + 0.731 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.741, + 0.483, + 0.877 + ], + "angle": 0, + "content": "In recent years, MLLMs [8, 27, 33, 35, 54, 55] have achieved remarkable progress in the intersecting fields of computer vision and natural language processing, and have been widely applied in tasks such as image captioning and visual question answering. However, these models often encounter the issue of \"object hallucination\" [15, 29, 32, 37] in practical applications, where the generated textual descriptions do not match the actual objects in the image. This problem highlights an over-reliance on unimodal pri" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.293, + 0.905, + 0.354 + ], + "angle": 0, + "content": "ors (especially language priors) [17, 48, 50, 53] during inference, posing potential risks in high-precision applications such as medical diagnosis [18, 46] and autonomous driving [9, 34, 39, 49]." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.355, + 0.907, + 0.522 + ], + "angle": 0, + "content": "To address object hallucination [1, 2, 7, 14], several Contrastive Decoding strategies have been introduced in recent years. Among these, the Visual Contrastive Decoding (VCD) method has shown promise in reducing hallucinations by contrasting output distributions from both original and perturbed visual inputs, thus mitigating the model's excessive reliance on language priors [16, 41]. Notably, contrastive decoding methods do not require additional training or external tools, offering both computational efficiency and versatility, which has garnered them significant attention. However, these methods present two main limitations:" + }, + { + "type": "title", + "bbox": [ + 0.541, + 0.535, + 0.783, + 0.55 + ], + "angle": 0, + "content": "Limitations of Contrastive Decoding" + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.562, + 0.88, + 0.607 + ], + "angle": 0, + "content": "- While reducing over-reliance on language priors, these methods may compromise the coherence and accuracy of generated content." + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.608, + 0.881, + 0.653 + ], + "angle": 0, + "content": "- Contrastive decoding necessitates separate processing of the original and contrastive inputs, which considerably increases inference time." + }, + { + "type": "list", + "bbox": [ + 0.54, + 0.562, + 0.881, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.673, + 0.907, + 0.824 + ], + "angle": 0, + "content": "To address these shortcomings, we hope to propose a training-free method that can effectively reduces hallucinations without compromising content quality or inference speed. Our saliency analysis of the model's attention maps reveals that biases toward language in generated content do not arise from an overemphasis on language signals but rather from insufficient attention on visual information during modality fusion. Based on this insight, we introduce a novel, plug-and-play technique to mitigate hallucinations: Visual Amplification Fusion (VAF)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Our analysis indicates that modality fusion in MLLMs primarily occurs within the middle layers. VAF specifically amplifies visual signals at these middle layers, enabling the model to capture more distinctive visual features during fusion, which in turn reduces false descriptions in generated" + }, + { + "type": "page_footnote", + "bbox": [ + 0.11, + 0.888, + 0.238, + 0.9 + ], + "angle": 0, + "content": "*Corresponding Author" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.483, + 0.168 + ], + "angle": 0, + "content": "text. This technique not only strengthens the model's visual representations but also retains the beneficial influence of language priors, thus preserving content quality. Furthermore, by eliminating the need to process contrastive samples, VAF maintains inference speed." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.168, + 0.483, + 0.304 + ], + "angle": 0, + "content": "Experimental results validate the effectiveness of the VAF method. Across multiple object hallucination benchmarks, VAF demonstrated notable performance gains, with improvements of approximately \\(3\\%\\) on POPE and \\(7\\%\\) on MME. In terms of coherence and accuracy of generated responses, VCD caused a roughly \\(19\\%\\) decrease on NoCaps, while VAF maintained content quality without negative impacts. Additionally, VCD reduced inference speed by \\(50\\%\\), whereas VAF had virtually no effect on inference speed." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.305, + 0.446, + 0.319 + ], + "angle": 0, + "content": "In summary, the main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.32, + 0.483, + 0.365 + ], + "angle": 0, + "content": "- We identify the negative impacts of contrastive decoding methods on both the quality of generated content and model inference speed." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.366, + 0.483, + 0.409 + ], + "angle": 0, + "content": "- We analyze the modality fusion mechanism in MLLMs, highlighting its insufficient attention to visual information." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.41, + 0.483, + 0.456 + ], + "angle": 0, + "content": "- We introduce the VAF method, which effectively mitigates the object hallucination problem while maintaining inference speed, coherence, and accuracy." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.457, + 0.483, + 0.501 + ], + "angle": 0, + "content": "- We demonstrate the significant performance improvements of the VAF method across multiple object hallucination benchmarks." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.32, + 0.483, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.517, + 0.228, + 0.533 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.542, + 0.41, + 0.559 + ], + "angle": 0, + "content": "2.1. Multimodal Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.565, + 0.483, + 0.792 + ], + "angle": 0, + "content": "The development of MLLMs [26, 36, 51, 52] has advanced from BERT-based decoders to LLM-based architectures [4, 11, 40, 43-45], enabling improved multimodal relationship capture [6, 10, 24, 25]. Models like BLIP-2 [27] and miniGPT-4 [55] incorporate a Q-Former mechanism, which enhances the alignment between visual and textual inputs, allowing for more precise interactions across modalities. InstructBLIP [12] builds on this approach by adding task-specific instructions, which improve the model's understanding of context-sensitive visual semantics. LLaVA [33] and Qwen-VL [5] utilize simpler linear projection techniques that streamline the alignment process, resulting in improved overall performance on vision-language tasks. However, hallucination issues persist across MLLMs, posing a significant challenge that requires further research." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.803, + 0.373, + 0.82 + ], + "angle": 0, + "content": "2.2. Contrastive Decoding Strategies" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.483, + 0.902 + ], + "angle": 0, + "content": "In recent years, Contrastive Decoding [19, 21, 22, 28] has emerged as a technique to improve generative model accuracy through contrastive judgment, widely employed to address hallucinations in generated content. For instance, Visual Contrastive Decoding (VCD) [23] contrasts output" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.319 + ], + "angle": 0, + "content": "distributions derived from original and distorted visual inputs, effectively reducing the over-reliance on statistical bias and unimodal priors, two essential causes of object hallucinations. Similarly, Instruction Contrastive Decoding (ICD) [47] works by comparing distributions derived from standard and disrupted instructions, thereby removing hallucinated concepts from the original distribution. These contrastive methods help ground generated content closely to visual inputs, resulting in contextually accurate outputs. However, despite these advancements, contrastive decoding faces two primary limitations: slower inference speed and reduced coherence in generated content. To overcome these limitations, we propose the VAF method, which achieves effective hallucination reduction while preserving both inference speed and content coherence." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.33, + 0.773, + 0.347 + ], + "angle": 0, + "content": "3. Preliminary and Motivation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.356, + 0.907, + 0.447 + ], + "angle": 0, + "content": "In Sec. 3.1, we illustrate the working mechanism of contrastive decoding to mitigate hallucinations, using Visual Contrastive Decoding as an example. In Sec. 3.2, we analysis two main drawbacks of this approach: its potential to disrupt the coherence and accuracy of generated content, and its tendency to slow down model inference." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.454, + 0.719, + 0.47 + ], + "angle": 0, + "content": "3.1. Contrastive Decoding" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.476, + 0.906, + 0.582 + ], + "angle": 0, + "content": "We consider a MLLM parametrized by \\(\\theta\\). The model takes as input a textual query \\(x\\) and a visual input \\(v\\), where \\(v\\) provides contextual visual information to assist the model in generating a relevant response \\(y\\) to the textual query. The response \\(y\\) is sampled auto-regressively from the probability distribution conditioned on the query \\(x\\) and the visual context \\(v\\). Mathematically, this can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.59, + 0.905, + 0.626 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} y _ {t} \\sim p _ {\\theta} \\left(y _ {t} \\mid v, x, y _ {< t}\\right) \\tag {1} \\\\ \\propto \\exp \\operatorname {l o g i t} _ {\\theta} \\left(y _ {t} \\mid v, x, y _ {< t}\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.633, + 0.906, + 0.663 + ], + "angle": 0, + "content": "where \\(y_{t}\\) denotes the token at time step \\(t\\), and \\(y_{< t}\\) represents the sequence of generated tokens up to the time step \\((t - 1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.664, + 0.907, + 0.845 + ], + "angle": 0, + "content": "To mitigate the issue of object hallucination in MLLMs, contrastive decoding techniques can be applied. Here, we present Visual Contrastive Decoding (VCD) as a representative approach, shown in Fig. 1. Specifically, given a textual query \\( x \\) and a visual input \\( v \\), the model generates two distinct output distributions: one conditioned on the original \\( v \\) and the other on the distorted visual input \\( v' \\), which is derived by applying pre-defined distortions (i.e., Gaussian noise mask) to \\( v \\). Then, a new contrastive probability distribution is computed by exploiting the differences between the two initially obtained distributions. The new contrastive distribution \\( p_{vcd} \\) is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.536, + 0.851, + 0.905, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} p _ {v c d} (y \\mid v, v ^ {\\prime}, x) = \\text {s o f t m a x} \\left[ \\operatorname {l o g i t} _ {\\theta} (y \\mid v, x) + \\right. \\tag {2} \\\\ \\left. \\alpha \\cdot \\left(\\operatorname {l o g i t} _ {\\theta} (y \\mid v, x) - \\operatorname {l o g i t} _ {\\theta} (y \\mid v ^ {\\prime}, x)\\right) \\right], \\\\ \\end{array}\n\\]" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.168 + ], + "angle": 0, + "content": "where larger \\(\\alpha\\) values indicate a stronger amplification of differences between the two distributions (\\(\\alpha = 0\\) reduces to regular decoding). Essentially, VCD serves as a corrective mechanism, reducing hallucinations by contrasting against a distribution predisposed to favoring them." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.18, + 0.48, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.334, + 0.485, + 0.418 + ], + "angle": 0, + "content": "Figure 1. Illustration of Visual Contrastive Decoding. The hallucinated object \"Teacher\" is suppressed by contrasting with an output distribution prone to hallucinations. This method has two main drawbacks: (1) additional processing of distorted visual inputs greatly increases inference time; (2) subtracting the language prior disrupts content coherence." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.444, + 0.407, + 0.46 + ], + "angle": 0, + "content": "3.2. Limitations of Contrastive Decoding" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.467, + 0.484, + 0.557 + ], + "angle": 0, + "content": "As contrastive decoding methods do not require training or external tools, they offer high computational efficiency and generalizability, attracting significant attention in academia. However, these methods still have two major drawbacks: a reduction in the quality of generated content and slower inference speed." + }, + { + "type": "image", + "bbox": [ + 0.122, + 0.572, + 0.445, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.753, + 0.483, + 0.809 + ], + "angle": 0, + "content": "Figure 2. Impact of VCD on Model Performance. CIDEr scores are reported on the Nocaps benchmark, while Accuracy is presented for the ScienceQA benchmark. The use of VCD leads to a significant decline in model performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.903 + ], + "angle": 0, + "content": "While contrasting logits of \\( p_{\\theta}(y \\mid v, x) \\) and \\( p_{\\theta}(y \\mid v', x) \\) can help reduce over-reliance on language priors and mitigate hallucination in MLLMs-as evidenced by a \\( 4\\% \\) performance gain on the POPE benchmark using the VCD method-merely decreasing the influence of the language" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.212 + ], + "angle": 0, + "content": "modality on the output distribution may undermine the coherence of the generated content, potentially leading to prediction errors. This issue is less pronounced in straightforward object hallucination tasks, where responses are limited to binary options, such as \"yes\" or \"no\". However, in more complex tasks, including multiple-choice question answering and image caption, the impact of contrastive learning methods on content quality becomes more significant." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.212, + 0.907, + 0.349 + ], + "angle": 0, + "content": "To verify this, we applied VCD method to LLaVA-v1.5-7B and LLaVA-v1.5-13B models, assessing their performance on the ScienceQA [38] and NoCaps benchmarks. As illustrated in Fig. 2, our findings reveal that, following the application of VCD, model performance decreased by \\(5\\%\\) on ScienceQA and by a considerable \\(45\\%\\) on NoCaps. These results suggest that in tasks requiring nuanced natural language generation, contrastive decoding methods can substantially impair content quality." + }, + { + "type": "table", + "bbox": [ + 0.53, + 0.359, + 0.892, + 0.466 + ], + "angle": 0, + "content": "
ModelMethodScienceQANocaps
LLaVA-v1.5-7BRegular0.141s0.456s
VCD0.293s1.086s
LLaVA-v1.5-13BRegular0.222s0.602s
VCD0.459s1.372s
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.475, + 0.907, + 0.532 + ], + "angle": 0, + "content": "Table 1. Impact of VCD on Model Inference Speed. The table shows the average inference time per sample (in seconds) on the ScienceQA and Nocaps benchmarks. Applying the VCD method nearly doubled the inference time of the model." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.547, + 0.906, + 0.698 + ], + "angle": 0, + "content": "Contrastive decoding methods notably reduce inference speed because they require calculating the output distribution for additional contrastive samples. For instance, in VCD method, each visual input \\( v \\) necessitates computing the logits of both \\( p_{\\theta}(y \\mid v, x) \\) and \\( p_{\\theta}(y \\mid v', x) \\) separately. This doubles the computation load during inference compared to vanilla decoding. We evaluated the inference speed of VCD versus vanilla decoding on ScienceQA. The experimental results, shown in Tab. 1, reveal that VCD's inference time is almost double that of vanilla decoding." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.71, + 0.803, + 0.727 + ], + "angle": 0, + "content": "4. Visual Neglect in Modal Fusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.903 + ], + "angle": 0, + "content": "The primary objective of this section is to examine why MLLMs tend to rely excessively on language priors in their predictions. In Sec. 4.1, saliency analysis reveals that image tokens influence prediction outcomes mainly through interactions with instruction tokens within the middle layers. Sec. 4.2 then compares attention weights across different modalities, showing that the attention given to visual features is notably lower than that allocated to system prompts and user instructions. These findings indicate that visual information is often underutilized in the modality fusion process, resulting in an over-reliance on language priors." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.091, + 0.091, + 0.405, + 0.108 + ], + "angle": 0, + "content": "4.1. Mid-layer: Visual-Language Fusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.114, + 0.483, + 0.204 + ], + "angle": 0, + "content": "To uncover why MLLMs tend to overly rely on language priors and overlook visual content in prediction, it is necessary first to clarify how the model utilizes visual modality information. This section explores the influence of the visual modality on prediction outcomes from the perspective of visual information interaction." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.206, + 0.484, + 0.281 + ], + "angle": 0, + "content": "We employ the saliency technique, a widely used interpretability tool, to highlight key token interactions within the attention mechanism. Following established practices, we utilize Taylor expansion to compute saliency scores for each element of the attention matrix:" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.292, + 0.483, + 0.334 + ], + "angle": 0, + "content": "\\[\nI _ {l} = \\left| \\sum_ {h} A _ {h, l} \\odot \\frac {\\partial \\mathcal {L} (x)}{\\partial A _ {h , l}} \\right|. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.346, + 0.484, + 0.466 + ], + "angle": 0, + "content": "Here, \\( A_{h,l} \\) represents the attention matrix value for the \\( h \\)-th attention head in the \\( l \\)-th layer, \\( x \\) denotes the input, and \\( \\mathcal{L}(x) \\) is the loss function of the task, e.g., the cross-entropy objective for question-answering tasks. The saliency matrix \\( I_{l} \\) for the \\( l \\)-th layer is obtained by averaging across all attention heads. The significance of information flow from the \\( j \\)-th token to the \\( i \\)-th token in MLLMs is represented by \\( I_{l}(i,j) \\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.468, + 0.484, + 0.543 + ], + "angle": 0, + "content": "To draw a clearer picture of visual information flow in MLLMs, we introduce two quantitative metrics based on \\( I_{l}(i,j) \\), with a particular focus on the information interaction involving image tokens. The definitions of the two quantitative metrics follow below." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.543, + 0.484, + 0.574 + ], + "angle": 0, + "content": "\\(S_{vv}\\), measuring the importance of information flow among image tokens:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.586, + 0.482, + 0.622 + ], + "angle": 0, + "content": "\\[\nS _ {v v} = \\frac {\\sum_ {(i , j) \\in C _ {v v}} I _ {l} (i , j)}{\\left| C _ {v v} \\right|} \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.625, + 0.395, + 0.642 + ], + "angle": 0, + "content": "\\[\nC _ {v v} = \\{(i, j): i, j \\in \\mathcal {V}, i \\geq j \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.654, + 0.484, + 0.685 + ], + "angle": 0, + "content": "\\(S_{vt}\\), measuring the importance of information flow from image tokens to instruction tokens:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.697, + 0.483, + 0.733 + ], + "angle": 0, + "content": "\\[\nS _ {v t} = \\frac {\\sum_ {(i , j) \\in C _ {v t}} I _ {l} (i , j)}{\\left| C _ {v t} \\right|} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.735, + 0.39, + 0.752 + ], + "angle": 0, + "content": "\\[\nC _ {v t} = \\{(i, j): i \\in \\mathcal {T}, j \\in \\mathcal {V} \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Here, \\(\\mathcal{V}\\) represents the index set of image tokens, derived from features learned by pre-trained visual encoders, while \\(\\mathcal{T}\\) denotes the index set of instruction tokens, specifying requests or questions related to the images. \\(S_{vv}\\) and \\(S_{vt}\\) are utilized to analyze the mechanisms of visual information processing in MLLMs. We define attention interactions among image tokens as intra-visual information flow and those between image and instruction tokens as visual-textual information flow." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.228 + ], + "angle": 0, + "content": "We conducted experiments with the LLaVA-v1.5-7B model on the MS COCO dataset under the POPE benchmark, sampling 500 examples for evaluation. Fig. 3 underscores the critical role of the visual-textual information flow within the model's middle layers, specifically from the 8-th to the 15-th layer. This observation indicates that in these layers, visual information interacts intensively with textual information via attention mechanisms, which substantially influences the prediction outcomes." + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.245, + 0.88, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.419, + 0.907, + 0.462 + ], + "angle": 0, + "content": "Figure 3. The importance of intra-visual flow and visual-textual flow across various layers. The visual-textual information flow in the middle layers has a significant impact on prediction outcomes." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.477, + 0.851, + 0.492 + ], + "angle": 0, + "content": "4.2. Attention Imbalance Across Modalities" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.499, + 0.906, + 0.575 + ], + "angle": 0, + "content": "Sec. 4.1 reveals that the middle layers facilitate crucial fusion, integrating visual and textual inputs into cross-modal semantic representations that drive final predictions. Accordingly, this section will delve deeper into the attention to visual inputs throughout the modality fusion process." + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.592, + 0.882, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.782, + 0.907, + 0.838 + ], + "angle": 0, + "content": "Figure 4. Attention Distribution of Modal Information Across Model Layers. In the middle layers, the model allocates insufficient attention to visual features while disproportionately focusing on system prompts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.906, + 0.902 + ], + "angle": 0, + "content": "We define the attention allocation, \\(\\lambda\\), as the aggregate attention score assigned to a specific type of token within a single layer. Accordingly, the attention allocation for sys" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.483, + 0.122 + ], + "angle": 0, + "content": "tem prompts, visual features, and user instructions in the \\(l\\)-th layer can be computed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.135, + 0.369, + 0.167 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {s y s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {S}} A _ {l} (i, j),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.171, + 0.483, + 0.203 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {v i s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {V}} A _ {l} (i, j), \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.208, + 0.207, + 0.369, + 0.24 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {i n s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {T}} A _ {l} (i, j).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.246, + 0.484, + 0.351 + ], + "angle": 0, + "content": "In this context, \\( A_{l} \\) represents the attention matrix averaged across all attention heads, while \\( S \\) represents the indices of system tokens. The measures \\( \\lambda_{sys}^{l}, \\lambda_{vis}^{l} \\), and \\( \\lambda_{ins}^{l} \\) provide insight into the distribution of attention to different modalities across various layers, aiding in understanding the reasons for the underutilization of visual information during the modality fusion process." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.352, + 0.484, + 0.489 + ], + "angle": 0, + "content": "The experimental setup aligns with that described in Sec. 4.1. Fig. 4 illustrates the allocation of attention to different modalities across the model's layers. In the middle layers, attention to visual features is markedly lower than that given to system prompts and user instructions. This suggests that during the critical process of modality fusion, the model's focus on visual input is insufficient. As a result, visual information is underutilized, leading to an output distribution skewed toward language priors." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.498, + 0.19, + 0.514 + ], + "angle": 0, + "content": "4.3. Insights" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.521, + 0.483, + 0.551 + ], + "angle": 0, + "content": "Based on the experimental results presented in Sec. 4.1 and Sec. 4.2, two significant conclusions can be drawn:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.554, + 0.483, + 0.599 + ], + "angle": 0, + "content": "- The model performs the crucial fusion of visual and textual modalities in the middle layers, creating cross-modal semantic representations that drive the final predictions." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.6, + 0.483, + 0.63 + ], + "angle": 0, + "content": "- During this critical fusion process, the model demonstrates inadequate attention to the visual modality." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.554, + 0.483, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.632, + 0.484, + 0.692 + ], + "angle": 0, + "content": "These findings indicate that models fail to fully utilize visual information, resulting in an excessive dependence on language priors and, subsequently, the occurrence of hallucination phenomena." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.707, + 0.35, + 0.725 + ], + "angle": 0, + "content": "5. Visual Amplification Fusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.732, + 0.484, + 0.839 + ], + "angle": 0, + "content": "Building on the insights presented in Sec. 4, we introduce a hallucination mitigation method called Visual Amplification Fusion (VAF). As illustrated in Fig. 5, This approach heightens attention to visual information during modality fusion, effectively reducing the excessive dependency on language priors and ensuring that the generated content is closely grounded to visual inputs." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.849, + 0.317, + 0.863 + ], + "angle": 0, + "content": "5.1. Attention Redistribution" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "As outlined in Sec. 4, the model performs crucial fusion of visual and textual modalities within the middle layers." + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.089, + 0.885, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.244, + 0.907, + 0.302 + ], + "angle": 0, + "content": "Figure 5. Illustration of the Visual Amplification Fusion Method. In the middle layers, we select attention heads highly responsive to visual information, amplifying their focus on visual features while reducing unnecessary attention to system prompts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.324, + 0.907, + 0.385 + ], + "angle": 0, + "content": "However, the attention allocated to visual modality information during this process remains insufficient. To address this, we adjust the attention weights in these layers to achieve a more balanced focus." + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.393, + 0.887, + 0.556 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.567, + 0.907, + 0.636 + ], + "angle": 0, + "content": "Figure 6. Effect of Enhanced Visual Attention on Hallucination Suppression. Increasing attention to visual features in the fusion process of the model's middle layers successfully reduces hallucinations, enabling the model to correct its grape color prediction from \"green\" to \"red\"." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.654, + 0.906, + 0.699 + ], + "angle": 0, + "content": "Let \\(A_{l,h}\\) denote the attention matrix of the \\(h\\)-th attention head in the \\(l\\)-th layer, and \\(Z_{l,h}\\) represent its corresponding attention score matrix, defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.708, + 0.905, + 0.724 + ], + "angle": 0, + "content": "\\[\nA _ {l, h} = \\operatorname {s o f t m a x} \\left(Z _ {l, h}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.732, + 0.907, + 0.838 + ], + "angle": 0, + "content": "Our objective during the modality fusion process is to amplify the model's attention to visual features while curbing an overemphasis on system prompts. This adjustment facilitates improved integration of visual information and reduces over-reliance on language priors. To achieve this, we modify the attention score matrix in the middle layers (i.e., \\(8 < l < 15\\)) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.532, + 0.845, + 0.905, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\hat {Z} _ {l, h} = Z _ {l, h} + \\alpha \\cdot M _ {l, h} ^ {\\text {e n h}} \\circ Z _ {l, h} - \\beta \\cdot M _ {l, h} ^ {\\text {s u p}} \\circ Z _ {l, h}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Here, \\(\\alpha\\) is the enhancement coefficient (\\(\\alpha > 0\\)), where larger values indicate stronger amplification of visual attenuation." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.166 + ], + "angle": 0, + "content": "tion. The suppression coefficient \\(\\beta\\) (\\(0 < \\beta < 1\\)) determines the extent of attention suppression directed at system prompts. The enhancement and suppression mask matrices, \\(M_{l,h}^{enh}\\) and \\(M_{l,h}^{sup}\\) respectively, are defined to guide the modulation of attention elements:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.173, + 0.483, + 0.2 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} M _ {l, h} ^ {e n h} (i, j) = \\mathbb {I} (i \\in \\mathcal {T}, j \\in \\mathcal {V}), \\\\ 1. 5 ^ {\\text {s u p}} (i, j) = \\mathbb {I} (i = \\mathcal {T}, j = \\mathcal {Q}). \\end{array} \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.194, + 0.39, + 0.213 + ], + "angle": 0, + "content": "\\[\nM _ {l, h} ^ {s u p} (i, j) = \\mathbb {I} (i \\in \\mathcal {T}, j \\in \\mathcal {S}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.222, + 0.484, + 0.313 + ], + "angle": 0, + "content": "These modifications optimize attention allocation by enhancing the model's focus on visual features during modality fusion and minimizing superfluous attention to system prompts. As illustrated in Fig. 6, preliminary analysis indicates that this approach effectively mitigates hallucination issues by promoting greater attention to visual information." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.32, + 0.353, + 0.336 + ], + "angle": 0, + "content": "5.2. Visual Perception Restriction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.342, + 0.484, + 0.478 + ], + "angle": 0, + "content": "Enhancing visual attention across all attention heads in the middle layers can be overly aggressive and may negatively impact content generation. To address this, we propose a selective enhancement strategy. Specifically, we identify and isolate the attention heads that exhibit higher sensitivity to visual information, which we term visual perception heads. We then restrict the visual attention enhancement to these visual perception heads, ensuring better utilization of visual information while maintaining overall model performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.479, + 0.484, + 0.584 + ], + "angle": 0, + "content": "In the model, attention heads that allocate more attention to visual features demonstrate heightened sensitivity to visual information. Let \\( A_{l,h} \\) represent the attention matrix of the \\( h \\)-th attention head in the \\( l \\)-th layer of the model, with its corresponding visual attention allocation denoted by \\( \\lambda_{\\mathrm{vis}}^{l,h} \\). In each attention layer, we identify the attention heads whose visual attention allocation fall within the top" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.906, + 0.138 + ], + "angle": 0, + "content": "\\(50\\%\\) and designate them as visual perception heads, subsequently redistributing their attention. The attention matrices of the remaining attention heads are kept unchanged." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.151, + 0.64, + 0.168 + ], + "angle": 0, + "content": "6. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.177, + 0.907, + 0.299 + ], + "angle": 0, + "content": "This section demonstrates the effectiveness of the proposed VAF method in mitigating hallucinations. Sec. 6.1 outlines the experimental setup, detailing the evaluation benchmarks and VAF parameter configurations. Sec. 6.2 then presents the experimental results from three perspectives: reduction of hallucinations, coherence of generated content, and inference speed. Finally, Sec. 6.3 further verifies the contribution of each VAF component through ablation studies." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.308, + 0.722, + 0.324 + ], + "angle": 0, + "content": "6.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.33, + 0.906, + 0.376 + ], + "angle": 0, + "content": "In Sec. 6.1.1, we present the selected datasets and evaluation metrics. Sec. 6.1.2 details the chosen MLLM backbone models, and Sec. 6.1.3 outlines the baseline settings." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.384, + 0.774, + 0.398 + ], + "angle": 0, + "content": "6.1.1. Datasets & Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.403, + 0.906, + 0.552 + ], + "angle": 0, + "content": "Polling-based Object Probing Evaluation (POPE). POPE [30] is a novel framework designed to evaluate object hallucinations in MLLMs. Departing from traditional caption-based approaches, POPE frames hallucination detection as a binary task by posing straightforward yes-or-no questions regarding the presence of specific objects in an image (e.g., \"Is there a chair in the image?\"). Performance on POPE is measured across four metrics: Accuracy, Precision, Recall, and F1 score, allowing for a thorough evaluation of hallucinations in MLLMs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.553, + 0.906, + 0.585 + ], + "angle": 0, + "content": "Multimodal Model Evaluation (MME). MME [13] benchmark provides a comprehensive framework for evalu" + }, + { + "type": "table", + "bbox": [ + 0.165, + 0.605, + 0.843, + 0.858 + ], + "angle": 0, + "content": "
CategoryMethodLLaVA-v1.5-7BLLaVA-v1.5-13BQwen-VL-Chat-7B
AccuracyF1-scoreAccuracyF1-scoreAccuracyF1-score
RandomRegular87.8↑0.087.5↑0.087.6↑0.087.4↑0.088.2↑0.087.9↑0.0
VCD88.4↑0.687.7↑0.288.9↑1.387.8↑0.489.1↑0.988.4↑0.5
ICD88.1↑0.387.6↑0.188.1↑0.587.6↑0.288.9↑0.788.1↑0.2
VAF89.6↑1.889.3↑1.890.1↑2.589.9↑2.590.0↑1.889.7↑1.8
PopularRegular82.5↑0.083.2↑0.082.7↑0.084.1↑0.082.4↑0.083.1↑0.0
VCD83.1↑0.684.1↑0.983.7↑1.085.1↑1.083.0↑0.684.1↑1.0
ICD82.1↓0.482.9↓0.382.9↑0.284.3↑0.283.2↑0.884.5↑1.4
VAF84.5↑2.084.9↑1.785.2↑2.586.4↑2.384.9↑2.585.1↑2.0
AdversarialRegular77.6↑0.079.4↑0.077.8↑0.079.5↑0.077.2↑0.078.9↑0.0
VCD78.1↑0.579.6↑0.278.2↑0.479.7↑0.278.8↑1.680.1↑1.2
ICD78.5↑0.979.9↑0.579.1↑1.380.1↑0.678.1↑0.979.2↑0.3
VAF80.1↑2.581.0↑1.680.7↑2.981.7↑2.280.4↑3.281.2↑2.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.868, + 0.906, + 0.898 + ], + "angle": 0, + "content": "Table 2. Performance on POPE. Results are averaged across the MS-COCO, A-OKVQA, and GQA datasets. The VAF method demonstrates superior hallucination suppression across all three MLLMs. The best performance for each setting is highlighted in red." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.114, + 0.089, + 0.891, + 0.344 + ], + "angle": 0, + "content": "
ModelMethodObject-levelAttribute-levelTotal Score
ExistenceCountPositionColor
LLaVA-v1.5-7BRegular185.00↑0.00146.67↑0.00128.33↑0.00150.00↑0.00610.00↑0.00
VCD185.00↑0.00141.33↓5.34128.33↑0.00153.00↑3.00607.66↓2.34
ICD185.00↑0.00148.33↑1.66126.66↓1.67148.33↓1.67608.32↓1.68
VAF195.00↑10.00158.33↑11.66128.33↑0.00155.00↑5.00636.67↑26.67
LLaVA-v1.5-13BRegular185.00↑0.00155.00↑0.00133.33↑0.00165.00↑0.00638.33↑0.00
VCD185.00↑0.00155.00↑0.00130.00↓3.33168.33↑3.33638.33↑0.00
ICD183.33↓1.67153.33↓1.67131.67↓1.66165.00↑0.00633.33↓5.00
VAF195.00↑10.00160.00↑5.00136.67↑3.34170.00↑5.00661.67↑23.34
Qwen-VL-7BRegular158.33↑0.00150.00↑0.00128.33↑0.00170.00↑0.00606.66↑0.00
VCD158.33↑0.00150.00↑0.00133.33↑5.00175.00↑5.00616.66↑10.00
ICD128.33↓30.00151.67↑1.67128.33↑0.00170.00↑0.00578.33↓28.33
VAF165.00↑6.67155.00↑5.00133.33↑5.00175.00↑5.00628.33↑21.67
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.353, + 0.908, + 0.385 + ], + "angle": 0, + "content": "Table 3. Results on the MME subset. Across three MLLMs, the VAF method achieved the most effective suppression of both object-level and attribute-level hallucinations. The highest scores in each setting are highlighted in red." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.409, + 0.482, + 0.528 + ], + "angle": 0, + "content": "ating MLLMs across both perceptual and cognitive dimensions. It consists of ten perception-oriented tasks and four cognition-oriented tasks, with model performance assessed through accuracy metrics. In addition to the full dataset, we leverage specific subsets, such as object existence and counting to analyze object-level hallucinations, while position and color subsets are employed to examine attribute-level hallucinations." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.529, + 0.483, + 0.636 + ], + "angle": 0, + "content": "Novel Object Captioning at Scale (Nocaps). NoCaps [3] benchmark is designed to evaluate image captioning models on their ability to describe novel objects absent from standard datasets like COCO. Model performance is quantified using the CIDEr score, providing a basis to assess the coherence and accuracy of generated captions in response to images containing unfamiliar objects." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.644, + 0.27, + 0.658 + ], + "angle": 0, + "content": "6.1.2. MLLM Backbones" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.662, + 0.483, + 0.829 + ], + "angle": 0, + "content": "In comparison to the Q-former structure, linear projection demonstrates greater efficiency in aligning visual and textual features. This advantage is evident in MLLMs with linear projection architectures, such as LLaVA and Qwen-VL, which outperform Q-former-based MLLMs like Instruct-BLIP and MiniGPT4. Based on these findings, we selected three linear-projection-based MLLMs, specifically LLaVA-v1.5-7B, LLaVA-v1.5-13B [35], and Qwen-VL-7B [5], to evaluate the effectiveness of our proposed VAF method. Detailed prompt templates for each model across various benchmarks are included in Sec. 10." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.837, + 0.26, + 0.853 + ], + "angle": 0, + "content": "6.1.3. Baseline Settings." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.856, + 0.485, + 0.903 + ], + "angle": 0, + "content": "We primarily compared our approach to the VCD [23] and ICD [47] methods. VCD mitigates hallucinations by contrasting output distributions derived from original and dis" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.409, + 0.907, + 0.501 + ], + "angle": 0, + "content": "torted visual inputs, while ICD reduces hallucinated concepts by comparing distributions generated with standard versus disrupted instructions. To ensure consistency and reproducibility in our comparisons, all methods use greedy search. Unless specified otherwise, our experiments set \\(\\beta = 0.1\\) and \\(\\alpha = 0.15\\)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.509, + 0.71, + 0.525 + ], + "angle": 0, + "content": "6.2. Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.531, + 0.907, + 0.608 + ], + "angle": 0, + "content": "Sec. 6.2.1 examines the effectiveness of various methods in mitigating hallucinations, while Sec. 6.2.2 assesses their impact on the quality of generated content. Sec. 6.2.3 then analyzes the influence of each method on inference speed. Additional experimental results are provided in Sec. 8." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.614, + 0.733, + 0.629 + ], + "angle": 0, + "content": "6.2.1. Hallucination Mitigation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.633, + 0.906, + 0.771 + ], + "angle": 0, + "content": "Tab. 2 presents the experimental results of the VAF method on the POPE benchmark, with results averaged across the MSCOCO [31], A-OKVQA [42], and GQA [20] datasets. Applied to both the LLaVA-v1.5 model family and the Qwen-VL model, the VAF method consistently surpasses the VCD and ICD methods in reducing hallucinations. Tab. 3 further highlights the performance of VAF on the MME benchmark, demonstrating its effectiveness in suppressing both object-level and attribute-level hallucinations." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.777, + 0.791, + 0.79 + ], + "angle": 0, + "content": "6.2.2. Coherence of Generated Content" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Tab. 4 presents the experimental results for various methods on the Nocaps and ScienceQA datasets. It is evident that VCD and ICD substantially degrade the quality of the generated content. Specifically, on the Nocaps dataset, VCD and ICD reduce CIDEr scores by \\(18\\%\\) and \\(27\\%\\), respectively. This degradation primarily arises from the crude disruption of language priors by contrastive decoding methods," + } + ], + [ + { + "type": "table", + "bbox": [ + 0.102, + 0.09, + 0.48, + 0.276 + ], + "angle": 0, + "content": "
ModelDecodingScienceQANocaps
AccuracyCIDEr
LLaVA-v1.5-7BRegular68.078.7
VCD64.565.7
ICD62.462.3
VAF68.578.8
LLaVA-v1.5-13BRegular71.682.6
VCD70.068.9
ICD69.260.3
VAF71.782.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.287, + 0.483, + 0.318 + ], + "angle": 0, + "content": "Table 4. Results on SQA and Nocaps datasets. The highest and second-highest scores are marked in red and blue, respectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.342, + 0.483, + 0.404 + ], + "angle": 0, + "content": "which leads to generated content lacking coherence and accuracy. By contrast, our method demonstrates minimal negative impact on prediction results, maintaining both coherence and accuracy effectively." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.41, + 0.25, + 0.426 + ], + "angle": 0, + "content": "6.2.3. Inference Speed" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.429, + 0.484, + 0.52 + ], + "angle": 0, + "content": "Fig. 7 illustrates the impact of different strategies on model inference speed within the Nocaps dataset. In comparison, the VCD and ICD methods nearly double the inference time due to the need to process contrastive input samples, whereas the VAF method has minimal impact on the inference speed of multimodal large language models." + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.539, + 0.443, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.724, + 0.483, + 0.768 + ], + "angle": 0, + "content": "Figure 7. Comparison of different strategies on inference speed. The VCD and ICD methods reduce inference speed by \\(50\\%\\), whereas the VAF method shows minimal impact." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.789, + 0.245, + 0.805 + ], + "angle": 0, + "content": "6.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Ablation studies on the enhancement coefficient \\(\\alpha\\) were conducted using the COCO-Random dataset within the POPE benchmark to understand its influence on model performance. Fig. 8 demonstrates that when \\(0 < \\alpha < 0.25\\), model hallucinations are effectively suppressed. However, when \\(\\alpha\\) surpasses 0.25, performance starts to degrade. We" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.906, + 0.152 + ], + "angle": 0, + "content": "propose that this reduction in performance may stem from an excessive focus on visual features, disrupting the balanced integration of language information and diminishing overall model effectiveness." + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.164, + 0.88, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.535, + 0.353, + 0.884, + 0.368 + ], + "angle": 0, + "content": "Figure 8. Ablation study of \\(\\alpha\\) on the POPE benchmark." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.372, + 0.907, + 0.493 + ], + "angle": 0, + "content": "We performed ablation studies on the visual perception restriction mechanism, evaluating its impact on the POPE and Nocaps benchmarks. Tab. 5 highlights the effects of restricting attention reallocation to visual perception heads. Increasing attention to visual features alone reduces model hallucinations, while confining this reallocation strategy to visual perception heads minimizes adverse effects on content quality. More ablation studies can be found in Sec. 9." + }, + { + "type": "table", + "bbox": [ + 0.528, + 0.504, + 0.895, + 0.609 + ], + "angle": 0, + "content": "
ModelVisual RestrictionPOPENocaps
LLaVA-7B89.878.8
X89.976.4
LLaVA-13B90.282.3
X90.081.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.62, + 0.906, + 0.663 + ], + "angle": 0, + "content": "Table 5. Ablation Study of Visual Perception Restriction Mechanism. Restricting attention redistribution to the visual perception heads more effectively preserves the quality of generated content." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.68, + 0.628, + 0.695 + ], + "angle": 0, + "content": "7. conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.705, + 0.907, + 0.901 + ], + "angle": 0, + "content": "In this paper, we identify two key drawbacks of using contrastive decoding to mitigate hallucinations in MLLMs: reduced quality of generated content and slower inference speed. To address these challenges, we propose a novel approach, Visual Amplification Fusion, which effectively mitigates hallucinations while preserving both inference speed and content generation quality. By enhancing the attention to visual features during modality fusion, VAF minimizes the over-reliance on language priors, ensuring a high degree of consistency between generated content and visual inputs. Extensive experiments across multiple benchmarks and MLLMs demonstrate that VAF provides a clear advantage in hallucination mitigation." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.259, + 0.108 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.484, + 0.206 + ], + "angle": 0, + "content": "This work is supported by the National Natural Science Foundation of China under Grant 62176246. This work is also supported by Anhui Province Key Research and Development Plan (202304a05020045), Anhui Province Natural Science Foundation (2208085UD17) and National Natural Science Foundation of China under Grant 62406098." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.218, + 0.188, + 0.234 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.244, + 0.484, + 0.312 + ], + "angle": 0, + "content": "[1] Vedika Agarwal, Rakshith Shetty, and Mario Fritz. Towards causal vqa: Revealing and reducing spurious correlations by invariant and covariant semantic editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9690-9698, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.314, + 0.484, + 0.354 + ], + "angle": 0, + "content": "[2] Aishwarya Agrawal, Dhruv Batra, and Devi Parikh. Analyzing the behavior of visual question answering models. arXiv preprint arXiv:1606.07356, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.357, + 0.483, + 0.424 + ], + "angle": 0, + "content": "[3] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV). IEEE, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.426, + 0.483, + 0.452 + ], + "angle": 0, + "content": "[4] Jinze Bai, Shuai Bai, and et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.454, + 0.483, + 0.494 + ], + "angle": 0, + "content": "[5] Jinze Bai, Shuai Bai, and et al. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.497, + 0.482, + 0.522 + ], + "angle": 0, + "content": "[6] Rohan Bavishi, Erich Elsen, and et al. Introducing our multimodal models, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.525, + 0.483, + 0.593 + ], + "angle": 0, + "content": "[7] Ali Furkan Biten, Lluís Gómez, and Dimosthenis Karatzas. Let there be a clock on the beach: Reducing object hallucination in image captioning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1381-1390, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.595, + 0.483, + 0.634 + ], + "angle": 0, + "content": "[8] Keqin Chen, Zhao Zhang, and et al. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.637, + 0.483, + 0.704 + ], + "angle": 0, + "content": "[9] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. arXiv preprint arXiv:2310.01957, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.706, + 0.483, + 0.759 + ], + "angle": 0, + "content": "[10] Zhe Chen, Weiyun Wang, and et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.762, + 0.483, + 0.815 + ], + "angle": 0, + "content": "[11] Wei-Lin Chiang and Zhuohan et al Li. Vicuna: An opensource chatbot impressing gpt-4 with \\(90\\%\\) chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.818, + 0.483, + 0.858 + ], + "angle": 0, + "content": "[12] Wenliang Dai and Junnan Li et al. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.86, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[13] Chaoyou Fu, Peixian Chen, and et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 6" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.244, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.162 + ], + "angle": 0, + "content": "[14] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.164, + 0.905, + 0.205 + ], + "angle": 0, + "content": "[15] Anisha Gunjal, Jihan Yin, and Erhan Bas. Detecting and preventing hallucinations in large vision language models. arXiv preprint arXiv:2308.06394, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.207, + 0.905, + 0.288 + ], + "angle": 0, + "content": "[16] Vipul Gupta, Zhuowan Li, Adam Kortylewski, Chenyu Zhang, Yingwei Li, and Alan Yuille. Swapmix: Diagnosing and regularizing the over-reliance on visual context in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5078-5088, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.291, + 0.905, + 0.345 + ], + "angle": 0, + "content": "[17] Yudong Han, Liqiang Nie, Jianhua Yin, Jianlong Wu, and Yan Yan. Visual perturbation-aware collaborative learning for overcoming the language prior problem. arXiv preprint arXiv:2207.11850, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.348, + 0.905, + 0.401 + ], + "angle": 0, + "content": "[18] Mingzhe Hu, Shaoyan Pan, Yuheng Li, and Xiaofeng Yang. Advancing medical imaging with language models: A journey from n-grams to chatgpt. arXiv preprint arXiv:2304.04920, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.404, + 0.905, + 0.486 + ], + "angle": 0, + "content": "[19] Qidong Huang, Xiaoyi Dong, Pan Zhang, Bin Wang, Conghui He, Jiaqi Wang, Dahua Lin, Weiming Zhang, and Nenghai Yu. Opera: Alleviating hallucination in multimodal large language models via over-trust penalty and retrospection-allocation. In CVPR, pages 13418-13427, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.489, + 0.904, + 0.53 + ], + "angle": 0, + "content": "[20] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In CVPR, pages 6700-6709, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.532, + 0.905, + 0.585 + ], + "angle": 0, + "content": "[21] Fushuo Huo, Wenchao Xu, Zhong Zhang, Haozhao Wang, Zhicheng Chen, and Peilin Zhao. Self-introspective decoding: Alleviating hallucinations for large vision-language models, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.588, + 0.905, + 0.629 + ], + "angle": 0, + "content": "[22] Chaoya Jiang, Haiyang Xu, and et al. Hallucination augmented contrastive learning for multimodal large language model. In CVPR, pages 27036-27046, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.631, + 0.905, + 0.685 + ], + "angle": 0, + "content": "[23] Sicong Leng, Hang Zhang, and et al. Mitigating object hallucinations in large vision-language models through visual contrastive decoding. In CVPR, pages 13872-13882, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.688, + 0.905, + 0.728 + ], + "angle": 0, + "content": "[24] Bo Li, Yuanhan Zhang, and et al. Mimic-it: Multi-modal in-context instruction tuning. arXiv preprint arXiv:2306.05425, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.731, + 0.905, + 0.758 + ], + "angle": 0, + "content": "[25] Bo Li, Kaichen Zhang, and et al. Llava next: Stronger llms supercharge multimodal capabilities in the wild, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.76, + 0.905, + 0.801 + ], + "angle": 0, + "content": "[26] Chunyuan Li, Cliff Wong, and et al. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. In NeurIPS, pages 28541-28564, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.803, + 0.905, + 0.857 + ], + "angle": 0, + "content": "[27] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.859, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[28] Xiang Lisa Li, Ari Holtzman, and et al. Contrastive decoding: Open-ended text generation as optimization. arXiv preprint arXiv:2210.15097, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[29] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.15, + 0.482, + 0.207 + ], + "angle": 0, + "content": "[30] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, pages 292-305, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.209, + 0.482, + 0.248 + ], + "angle": 0, + "content": "[31] Tsung-Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.252, + 0.482, + 0.308 + ], + "angle": 0, + "content": "[32] Fuxiao Liu, Kevin Lin, Linjie Li, Jianfeng Wang, Yaser Ya-coob, and Lijuan Wang. Mitigating hallucination in large multi-modal models via robust instruction tuning. arXiv preprint arXiv:2306.14565, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.31, + 0.482, + 0.351 + ], + "angle": 0, + "content": "[33] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, pages 34892-34916, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.354, + 0.482, + 0.409 + ], + "angle": 0, + "content": "[34] Haokun Liu, Yaonan Zhu, Kenji Kato, Izumi Kondo, Tadayoshi Aoyama, and Yasuhisa Hasegawa. Lm-based human-robot collaboration framework for manipulation tasks. arXiv preprint arXiv:2308.14972, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.412, + 0.482, + 0.453 + ], + "angle": 0, + "content": "[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In CVPR, pages 26296-26306, 2024. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.456, + 0.482, + 0.511 + ], + "angle": 0, + "content": "[36] Zhi-Song Liu, Robin Courant, and Vicky Kalogeiton. Funnynet-w: Multimodal learning of funny moments in videos in the wild. International Journal of Computer Vision, pages 1-22, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.513, + 0.482, + 0.569 + ], + "angle": 0, + "content": "[37] Holy Lvenia, Wenliang Dai, Samuel Cahyawijaya, Ziwei Ji, and Pascale Fung. Negative object presence evaluation (nope) to measure object hallucination in vision-language models. arXiv preprint arXiv:2310.05338, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.571, + 0.482, + 0.653 + ], + "angle": 0, + "content": "[38] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In The 36th Conference on Neural Information Processing Systems (NeurIPS), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.656, + 0.482, + 0.711 + ], + "angle": 0, + "content": "[39] Jinjie Mai, Jun Chen, Bing Li, Guocheng Qian, Mohamed Elhoseiny, and Bernard Ghanem. Llm as a robotic brain: Unifying egocentric memory and control. arXiv preprint arXiv:2304.09349, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.714, + 0.482, + 0.741 + ], + "angle": 0, + "content": "[40] AI Meta. Introducing meta llama 3: The most capable openly available llm to date. Meta AI, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.744, + 0.482, + 0.813 + ], + "angle": 0, + "content": "[41] Yulei Niu, Kaihua Tang, Hanwang Zhang, Zhiwu Lu, XianSheng Hua, and Ji-Rong Wen. Counterfactual vqa: A cause-effect look at language bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12700-12710, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.816, + 0.482, + 0.857 + ], + "angle": 0, + "content": "[42] Dustin Schwenk, Apoorv Khandelwal, and et al. A-okvqa: A benchmark for visual question answering using world knowledge. In ECCV, pages 146–162, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.859, + 0.482, + 0.902 + ], + "angle": 0, + "content": "[43] Rohan Taori, Ishaan Gulrajani, and et al. Stanford alpaca: an instruction-following llama model (2023). URL https://github.com/tatsu-lab/stanford_alpaca, 1(9), 2023. 2" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.092, + 0.906, + 0.134 + ], + "angle": 0, + "content": "[44] Hugo Touvron, Thibaut Lavril, and et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.136, + 0.906, + 0.176 + ], + "angle": 0, + "content": "[45] Hugo Touvron, Louis Martin, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.179, + 0.906, + 0.234 + ], + "angle": 0, + "content": "[46] Sheng Wang, Zihao Zhao, Xi Ouyang, Qian Wang, and Dinggang Shen. Chatcad: Interactive computer-aided diagnosis on medical image using large language models. arXiv preprint arXiv:2302.07257, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.235, + 0.906, + 0.288 + ], + "angle": 0, + "content": "[47] Xintong Wang, Jingheng Pan, and et al. Mitigating hallucinations in large vision-language models with instruction contrastive decoding. arXiv preprint arXiv:2403.18715, 2024.2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.291, + 0.906, + 0.347 + ], + "angle": 0, + "content": "[48] Yike Wu, Yu Zhao, Shiwan Zhao, Ying Zhang, Xiaojie Yuan, Guoqing Zhao, and Ning Jiang. Overcoming language priors in visual question answering via distinguishing superficially similar instances. arXiv preprint arXiv:2209.08529, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.349, + 0.906, + 0.389 + ], + "angle": 0, + "content": "[49] Zhenyu Wu, Ziwei Wang, Xiuwei Xu, Jiwen Lu, and Haibin Yan. Embodied task planning with large language models. arXiv preprint arXiv:2307.01848, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.391, + 0.906, + 0.446 + ], + "angle": 0, + "content": "[50] Hong Yan, Lijun Liu, Xupeng Feng, and Qingsong Huang. Overcoming language priors with self-contrastive learning for visual question answering. *Multimedia Tools and Applications*, 82(11):16343–16358, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.448, + 0.906, + 0.489 + ], + "angle": 0, + "content": "[51] Qinghao Ye, Haiyang Xu, and et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.491, + 0.906, + 0.532 + ], + "angle": 0, + "content": "[52] Shilong Zhang, Peize Sun, and et al. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.534, + 0.906, + 0.603 + ], + "angle": 0, + "content": "[53] Ren Zhibo, Wang Huizhen, Zhu Muhua, Wang Yichao, Xiao Tong, and Zhu Jingbo. Overcoming language priors with counterfactual inference for visual question answering. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics, pages 600-610, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.604, + 0.906, + 0.644 + ], + "angle": 0, + "content": "[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 130(9):2337-2348, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.646, + 0.906, + 0.702 + ], + "angle": 0, + "content": "[55] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.092, + 0.906, + 0.702 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.104, + 0.086, + 0.895, + 0.131 + ], + "angle": 0, + "content": "ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.179, + 0.391, + 0.196 + ], + "angle": 0, + "content": "8. Additional Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.202, + 0.484, + 0.307 + ], + "angle": 0, + "content": "Sec. 8.1 presents the additional experimental results across all tasks in the MME benchmark. Sec. 8.2 details the experimental outcomes on the three datasets within the POPE benchmark. Sec. 8.3 compares the inference speeds and memory usage of various methods on ScienceQA and Nocaps. Sec. 8.4 highlights case studies of the VAF method on the LLaVA-Bench dataset." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.179, + 0.857, + 0.196 + ], + "angle": 0, + "content": "8.1. Detailed Experimental Results on MME" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.202, + 0.906, + 0.308 + ], + "angle": 0, + "content": "Fig. 9 and Fig. 10 present the performance of the LLaVA model family on perception-related tasks within the MME benchmark. Models utilizing the VAF method demonstrate significantly better performance compared to those employing the VCD method. Notably, VAF achieves consistent leadership across all tasks with the LLaVA-v1.5-13B model, likely due to its ability to balance attention between" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.334, + 0.806, + 0.57 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.577, + 0.908, + 0.606 + ], + "angle": 0, + "content": "Figure 9. Performance of LLaVA-v1.5-7B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks." + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.625, + 0.806, + 0.862 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.868, + 0.908, + 0.898 + ], + "angle": 0, + "content": "Figure 10. Performance of LLaVA-v1.5-13B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.159, + 0.095, + 0.805, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.338, + 0.908, + 0.368 + ], + "angle": 0, + "content": "Figure 11. Performance of the LLaVA-v1.5-7B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method." + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.386, + 0.806, + 0.624 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.63, + 0.908, + 0.659 + ], + "angle": 0, + "content": "Figure 12. Performance of the LLaVA-v1.5-13B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.685, + 0.483, + 0.716 + ], + "angle": 0, + "content": "visual and language modalities, ensuring generated content aligns more closely with visual inputs." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.72, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Fig. 11 and Fig. 12 illustrate the performance of LLaVA model family on cognition-related tasks within the MME benchmark. The application of the VCD method significantly impaired the model's performance on these tasks, likely due to its disruptive effect on linguistic priors. In contrast, VAF method not only avoided such negative impacts but also resulted in a slight performance improvement. This improvement is attributed to VAF's ability to precisely resolve the model's tendency to overlook visual features during the critical fusion stage, facilitating better integration of visual information while preserving its effective use of linguistic information." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.684, + 0.86, + 0.701 + ], + "angle": 0, + "content": "8.2. Detailed Experimental Results on POPE" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Tab. 6 and Tab. 9 summarize the experimental results of the LLaVA-v.15 model family on the MSCOCO, A-OKVQA, and GQA datasets within the POPE benchmark. The results highlight that our approach consistently delivers more stable and significantly improved hallucination suppression compared to the VCD method. This advantage stems from our direct enhancement of attention to visual features during the modality fusion process, enabling balanced outputs across both visual and linguistic modalities. In contrast, the VCD method relies on suppressing language priors to indirectly enhance attention to visual information. Decoding method employed in all experiments utilizes greedy search." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.214, + 0.088, + 0.797, + 0.584 + ], + "angle": 0, + "content": "
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.294.281.587.4
VCD88.594.481.887.6
VAF89.892.986.289.4
PopularRegular86.189.981.585.5
VCD86.390.081.785.8
VAF87.588.686.287.4
AdversarialRegular82.382.981.382.1
VCD82.382.981.682.4
VAF83.486.878.982.6
A-OKVQARandomRegular87.687.687.787.6
VCD87.787.887.687.8
VAF89.491.786.689.1
PopularRegular81.978.487.782.8
VCD82.178.587.983.1
VAF84.282.686.684.6
AdversarialRegular74.368.887.777.1
VCD72.468.087.476.7
VAF77.272.986.679.2
GQARandomRegular88.087.189.388.2
VCD88.687.489.588.8
VAF89.590.888.089.4
PopularRegular79.474.489.381.1
VCD79.974.689.581.7
VAF81.878.388.082.9
AdversarialRegular76.370.689.378.9
VCD75.270.289.978.3
VAF79.775.488.081.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.595, + 0.907, + 0.625 + ], + "angle": 0, + "content": "Table 6. Experimental results of LLaVA-1.5-7B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.638, + 0.828, + 0.772 + ], + "angle": 0, + "content": "
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular88.25:3214.5G0.111s
VCD88.510:3115.7G0.210s
VAF89.85:4814.5G0.116s
LLaVA-v1.5-13BRegular88.48:3926.7G0.173s
VCD88.619:3827.8G0.392s
VAF90.28:4526.7G0.175s
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.784, + 0.907, + 0.813 + ], + "angle": 0, + "content": "Table 7. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on POPE benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.838, + 0.377, + 0.856 + ], + "angle": 0, + "content": "8.3. Comparison of Inference Speeds" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Tab. 7 and Tab. 8 assess the impact of various methods on the LLaVA-v1.5 model family, focusing on inference speed" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.839, + 0.907, + 0.901 + ], + "angle": 0, + "content": "and GPU memory usage. The results indicate that VCD significantly slows down inference, whereas our proposed method has a minimal effect. Furthermore, our method introduces no additional GPU memory requirements, in con" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.486, + 0.155 + ], + "angle": 0, + "content": "trast to VCD, which incurs substantial GPU memory overhead. This efficiency is achieved because our approach eliminates the need for extra processing of contrastive inputs, thereby significantly reducing computational over" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.909, + 0.138 + ], + "angle": 0, + "content": "head. All experiments were performed on a server equipped with a single A800 80G GPU, employing greedy search as the decoding strategy." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.173, + 0.83, + 0.31 + ], + "angle": 0, + "content": "
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular68.00:36:3914.5G0.488s
VCD64.51:18:4715.7G1.058s
VAF68.50:36:4114.5G0.489s
LLaVA-v1.5-13BRegular71.60:45:2026.7G0.604s
VCD70.01:46:5927.8G1.426s
VAF71.70:48:2426.7G0.645s
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.32, + 0.907, + 0.351 + ], + "angle": 0, + "content": "Table 8. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on Nocaps benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red." + }, + { + "type": "table", + "bbox": [ + 0.212, + 0.362, + 0.797, + 0.859 + ], + "angle": 0, + "content": "
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.494.681.687.6
VCD88.695.081.887.7
VAF90.294.285.689.7
PopularRegular86.991.381.686.2
VCD87.091.482.086.4
VAF88.490.685.688.0
AdversarialRegular83.484.981.483.1
VCD83.785.181.783.1
VAF84.583.885.584.7
A-OKVQARandomRegular88.088.887.187.9
VCD88.289.287.587.9
VAF89.491.486.889.1
PopularRegular83.981.787.184.3
VCD84.281.787.384.3
VAF86.085.486.886.1
AdversarialRegular76.071.087.178.2
VCD76.471.287.178.3
VAF78.274.186.879.9
GQARandomRegular88.387.889.088.4
VCD88.388.189.388.5
VAF89.787.892.289.9
PopularRegular83.379.889.084.1
VCD83.280.089.284.1
VAF85.283.088.685.7
AdversarialRegular78.573.389.080.4
VCD78.773.388.980.3
VAF80.876.688.682.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.868, + 0.907, + 0.898 + ], + "angle": 0, + "content": "Table 9. Experimental results of LLaVA-1.5-13B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.189, + 0.103, + 0.326, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.135, + 0.457, + 0.172 + ], + "angle": 0, + "content": "Prompt: How many uncut fruits are in the image?" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.103, + 0.659, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.68, + 0.135, + 0.79, + 0.171 + ], + "angle": 0, + "content": "Prompt: How many coffee mugs are in the set?" + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.227, + 0.424, + 0.253 + ], + "angle": 0, + "content": "Vanilla Decoding: There are five uncut fruits in the image." + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.228, + 0.761, + 0.253 + ], + "angle": 0, + "content": "Vanilla Decoding: There are four coffee mugs in the set." + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.287, + 0.426, + 0.313 + ], + "angle": 0, + "content": "Visual Contrastive Decoding: There are four uncut fruits in the image." + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.287, + 0.758, + 0.313 + ], + "angle": 0, + "content": "Visual Contrastive Decoding: There are three coffee mugs in the set." + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.347, + 0.424, + 0.372 + ], + "angle": 0, + "content": "Visual Amplification Fusion: There are three uncut fruits in the image." + }, + { + "type": "image_caption", + "bbox": [ + 0.543, + 0.347, + 0.758, + 0.372 + ], + "angle": 0, + "content": "Visual Amplification Fusion: There are three coffee mugs in the set." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.405, + 0.907, + 0.434 + ], + "angle": 0, + "content": "Figure 13. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on numerical perception tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.461, + 0.374, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.415, + 0.468, + 0.632, + 0.481 + ], + "angle": 0, + "content": "Prompt: What is unusual about this image?" + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.504, + 0.769, + 0.593 + ], + "angle": 0, + "content": "Vanilla Decoding: The unusual aspect of this image is that a man is ironing clothes while standing on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is usually done indoors, in a more controlled environment. The man's actions pose a risk to both himself and the passengers of the taxi, as he could lose his balance and fall, causing injury or damage to the vehicle. Additionally, the presence of a washing machine in the back of the taxi is also unconventional, as it is not a common feature in taxis." + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.616, + 0.768, + 0.716 + ], + "angle": 0, + "content": "Visual Contrastive Decoding: An unconventional aspect of the image is the presence of a man standing on the back of a yellow car while holding multiple clothes, seemingly ironing them. Ironing is typically done in a more controlled and stationary environment, such as a home or an ironing board in an indoor setting. However, in this case, the man is in a busy city driving a yellow taxi, which defies the traditional approach to ironing and creates an unusual scene. It may draw the attention of onlookers and passersby due to the uncommon behavior, making this image memorable." + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.74, + 0.768, + 0.807 + ], + "angle": 0, + "content": "Visual Amplification Fusion: The unusual aspect of this image is that a man is ironing clothes while sitting on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is generally done in a more controlled environment, such as a home or a laundromat. The man's position on the back of the taxi, combined with the act of ironing, creates an unconventional and unexpected scene." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.842, + 0.907, + 0.871 + ], + "angle": 0, + "content": "Figure 14. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on complex reasoning tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.227, + 0.101, + 0.393, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.108, + 0.59, + 0.119 + ], + "angle": 0, + "content": "Prompt: Describe this photo in detail." + }, + { + "type": "text", + "bbox": [ + 0.429, + 0.142, + 0.74, + 0.222 + ], + "angle": 0, + "content": "Vanilla Decoding: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is sitting in a chair, looking directly at the viewer. The painting is quite detailed, capturing the dog's facial expression and the intricate design of the dress. In the background, there is a person partially visible on the left side of the painting. The overall scene is quite captivating and unique, as it combines the artistic style of the Renaissance with the subject matter of a dog." + }, + { + "type": "text", + "bbox": [ + 0.43, + 0.244, + 0.74, + 0.294 + ], + "angle": 0, + "content": "Visual Contrastive Decoding: The image features a unique painting on a canvas, depicting a portrait of a bulldog in Renaissance style, giving it a Renaissance era appearance. In this painting, the bulldog is presented as a woman, complete with a veil on its head, looking directly to its right." + }, + { + "type": "text", + "bbox": [ + 0.43, + 0.294, + 0.74, + 0.353 + ], + "angle": 0, + "content": "The painting also includes several elements of landscape and architecture, giving a sense of a real setting. Apart from the central dog figure, there are three smaller figures or figures in the background, adding more depth and life to the painting. In the forefront, a chair is visible, positioned to the left of the dog, further enhancing the painting's depiction of a realistic scene." + }, + { + "type": "text", + "bbox": [ + 0.429, + 0.375, + 0.738, + 0.415 + ], + "angle": 0, + "content": "Visual Amplification Fusion: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is positioned in the center of the painting, and appears to be staring at the viewer." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.441, + 0.907, + 0.471 + ], + "angle": 0, + "content": "Figure 15. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.497, + 0.391, + 0.615 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.502, + 0.59, + 0.513 + ], + "angle": 0, + "content": "Prompt: Describe this photo in detail." + }, + { + "type": "text", + "bbox": [ + 0.431, + 0.537, + 0.741, + 0.626 + ], + "angle": 0, + "content": "Vanilla Decoding: The image features a beautiful display of three ripe and juicy passion fruit, cut in half to reveal their exquisite white meat inside. The ripe fruit is placed in a wooden bowl, making them the focal point of the image. Each fruit half rests in a different location on the table, showcasing the various sizes of these unique fruit-like berries. The close-up view of the cut open fruits and the table setting creates an appetizing scene, highlighting the natural color of the passion fruit and its appeal to anyone who enjoys a fresh and tangy experience with a hint of exotic taste." + }, + { + "type": "text", + "bbox": [ + 0.432, + 0.647, + 0.741, + 0.746 + ], + "angle": 0, + "content": "Visual Contrastive Decoding: The image features a wooden table with a basket of fruit, including a variety of fruits such as apples, oranges, and bananas. There are three apples on the table, with one placed near the left side, another in the middle, and the third one on the right side. In addition to the apples, there are two oranges on the table, one located in the middle and the other towards the right side. A banana can be seen on the left side of the table, and a cluster of grapes is also present, with some grapes placed near the center and others on the right side of the table. The assortment of fruits creates a colorful and appetizing display." + }, + { + "type": "text", + "bbox": [ + 0.434, + 0.764, + 0.741, + 0.824 + ], + "angle": 0, + "content": "Visual Amplification Fusion: The image features a wooden table with a basket of four purple fruits, possibly eggplants or figs, placed on it. The fruits are cut open, revealing their inwards. The fruits are arranged in a way that they are sitting on top of each other, with one fruit on the left side, another in the middle, and the third on the right side of the table. The fourth fruit is placed at the edge of the table." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.849, + 0.907, + 0.879 + ], + "angle": 0, + "content": "Figure 16. Additional example of VAF correcting model hallucinations on LLaVA-Bench, primarily focusing on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.097, + 0.092, + 0.473, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.318, + 0.34, + 0.331 + ], + "angle": 0, + "content": "(a) Accuracy Metric" + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.092, + 0.898, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.318, + 0.768, + 0.331 + ], + "angle": 0, + "content": "(b) F1-Score Metirc" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.342, + 0.908, + 0.385 + ], + "angle": 0, + "content": "Figure 17. The Effect of Enhancing Visual Attention at Different Layers on Prediction Accuracy. This experiment, conducted with the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark, demonstrates that enhancing attention to visual features in the model's middle layers significantly reduces hallucinations." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.411, + 0.35, + 0.427 + ], + "angle": 0, + "content": "8.4. Case study on LLaVA-Bench" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.433, + 0.484, + 0.539 + ], + "angle": 0, + "content": "Fig. 13, Fig. 14, Fig. 15, and Fig. 16 illustrate the effectiveness of various methods in mitigating model hallucinations on LLaVA-Bench. Across tasks such as numerical perception, image description, and complex reasoning, our approach demonstrates consistently superior performance in suppressing hallucinations. Experiments are conducted using LLaVA-v1.5-7B model." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.554, + 0.35, + 0.57 + ], + "angle": 0, + "content": "9. Additional Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.58, + 0.484, + 0.687 + ], + "angle": 0, + "content": "In Sec. 9.1, we examine how enhancing attention to visual features at different levels affects hallucination suppression. In Sec. 9.2, we analyze the influence of varying the suppression coefficient \\(\\beta\\) on mitigating hallucinations. Finally, in Sec. 9.3, we evaluate the performance of the VAF method in suppressing hallucinations under various sampling strategies." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.697, + 0.454, + 0.714 + ], + "angle": 0, + "content": "9.1. Effect of Enhancement at Different Layers" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.485, + 0.901 + ], + "angle": 0, + "content": "We enhanced attention to visual features in layers 0-5, 10-15, and 20-25. Fig. 17 demonstrates the impact of enhancing visual attention at different layers. Notably, enhancing attention in the middle layers significantly reduces hallucination, while modifications in the shallow and deep layers have minimal effect on the generation results. As discussed in Sec. 4.1, this is because the model primarily integrates modality information in the middle layers. Thus, enhancing the focus on visual features during this phase is crucial for effectively mitigating hallucination. Experiments are conducted using LLaVA-v1.5-7B model on COCO-Random dataset from the POPE Benchmark." + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.413, + 0.88, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.627, + 0.908, + 0.684 + ], + "angle": 0, + "content": "Figure 18. The effect of the suppression coefficient \\(\\beta\\) on the VAF method's ability to mitigate model hallucinations. The experiments were performed using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.712, + 0.803, + 0.728 + ], + "angle": 0, + "content": "9.2. Effect of Suppression Coefficient" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.903 + ], + "angle": 0, + "content": "We assessed the effect of the suppression coefficient \\(\\beta\\) on the performance of the VAF method using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark. In our experiments, \\(\\alpha\\) was fixed at 0.15, while \\(\\beta\\) was systematically adjusted. The results, presented in Fig. 18, reveal that when \\(0 < \\beta < 0.15\\), VAF significantly enhanced its ability to suppress hallucinations in the model. This improvement is likely due to VAF reducing redundant attention to system prompts in this range, thereby reinforcing focus on visual features and enabling generated content to better align with the visual input. Conversely," + } + ], + [ + { + "type": "table", + "bbox": [ + 0.234, + 0.089, + 0.773, + 0.341 + ], + "angle": 0, + "content": "
Sampling StrategyMethodAccuracyPrecisionRecallF1-Score
GreedyRegular88.294.481.487.4
VAF89.892.986.289.4
Direct SamplingRegular82.990.471.380.9
VAF83.990.680.985
Top PRegular84.392.172.582.1
VAF85.789.682.485.9
Top KRegular83.391.972.881.1
VAF8588.381.984.9
Top K + Temp0.5Regular85.595.174.984.5
VAF86.791.283.487
Top K + Temp1.5Regular80.487.170.277.8
VAF82.18678.281.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.352, + 0.907, + 0.395 + ], + "angle": 0, + "content": "Table 10. Effectiveness of the VAF method in mitigating model hallucination under different sampling strategies. The highest score in each setting is highlighted in red. Experiments were conducted using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.42, + 0.484, + 0.512 + ], + "angle": 0, + "content": "when \\(\\beta > 0.15\\), the model's performance deteriorated. We hypothesize that this decline stems from excessive suppression of attention to system prompts, which disrupts the delicate balance required for effectively integrating multimodal information, ultimately leading to a degradation in overall performance." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.523, + 0.425, + 0.541 + ], + "angle": 0, + "content": "9.3. Effect of Different Sampling Strategies" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.547, + 0.484, + 0.653 + ], + "angle": 0, + "content": "We evaluated the effectiveness of the VAF method in mitigating model hallucination under different sampling strategies using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark. The experimental results, shown in Tab. 10, indicate that the VAF method significantly mitigates model hallucination across all sampling strategies." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.669, + 0.36, + 0.687 + ], + "angle": 0, + "content": "10. Prompts for Different Tasks" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.695, + 0.484, + 0.743 + ], + "angle": 0, + "content": "POPE Dataset. In the POPE dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.768, + 0.458, + 0.816 + ], + "angle": 0, + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.821, + 0.236, + 0.835 + ], + "angle": 0, + "content": "USER: IMAGE" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.836, + 0.457, + 0.867 + ], + "angle": 0, + "content": "Is there a cow in the image? Please just answer yes or no." + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.873, + 0.216, + 0.887 + ], + "angle": 0, + "content": "ASSISTANT:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.42, + 0.907, + 0.467 + ], + "angle": 0, + "content": "Nocaps Datasets. In Nocaps and Flickr30k dataset, input template for the model is presented below, with prompts highlighted in green and image highlighted in red." + }, + { + "type": "text", + "bbox": [ + 0.539, + 0.491, + 0.881, + 0.537 + ], + "angle": 0, + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + }, + { + "type": "title", + "bbox": [ + 0.54, + 0.543, + 0.659, + 0.557 + ], + "angle": 0, + "content": "USER: IMAGE" + }, + { + "type": "text", + "bbox": [ + 0.597, + 0.559, + 0.88, + 0.589 + ], + "angle": 0, + "content": "Provide a one-sentence caption for the provided image." + }, + { + "type": "title", + "bbox": [ + 0.541, + 0.596, + 0.638, + 0.61 + ], + "angle": 0, + "content": "ASSISTANT:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.637, + 0.907, + 0.684 + ], + "angle": 0, + "content": "Sci-VQA Dataset. In the Sci-VQA dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red." + }, + { + "type": "text", + "bbox": [ + 0.539, + 0.708, + 0.881, + 0.755 + ], + "angle": 0, + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + }, + { + "type": "title", + "bbox": [ + 0.541, + 0.761, + 0.659, + 0.774 + ], + "angle": 0, + "content": "USER: IMAGE" + }, + { + "type": "text", + "bbox": [ + 0.598, + 0.776, + 0.822, + 0.789 + ], + "angle": 0, + "content": "Context: Select the best answer." + }, + { + "type": "text", + "bbox": [ + 0.598, + 0.791, + 0.88, + 0.819 + ], + "angle": 0, + "content": "Which property do these three objects have in common?" + }, + { + "type": "text", + "bbox": [ + 0.598, + 0.821, + 0.803, + 0.836 + ], + "angle": 0, + "content": "A. shiny B. slippery C. opaque" + }, + { + "type": "text", + "bbox": [ + 0.598, + 0.837, + 0.88, + 0.866 + ], + "angle": 0, + "content": "Answer with the option's letter from the given choices directly." + }, + { + "type": "title", + "bbox": [ + 0.541, + 0.873, + 0.638, + 0.887 + ], + "angle": 0, + "content": "ASSISTANT:" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_origin.pdf b/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bfb3fa19cba0c0ec87ee305bf2d0805028b87958 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07fec3dc39bc7703d1121390a1b21b6141ff89566b7db4399afcb62b3aee9888 +size 1960855 diff --git a/data/2025/2503_13xxx/2503.13107/full.md b/data/2025/2503_13xxx/2503.13107/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7456c870810a27c1fb95372dcdb53992a1035878 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/full.md @@ -0,0 +1,549 @@ +# ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models + +Hao Yin Gunagzong Si Zilei Wang* + +University of Science and Technology of China + +{yinhnavi, guangzongsi}@mail.ustc.edu.cn, zlwang@ustc.edu.cn + +# Abstract + +Contrastive decoding strategies are widely used to mitigate object hallucinations in multimodal large language models (MLLMs). By reducing over-reliance on language priors, these strategies ensure that generated content remains closely grounded in visual inputs, producing contextually accurate outputs. Since contrastive decoding requires no additional training or external tools, it offers both computational efficiency and versatility, making it highly attractive. However, these methods present two main limitations: (1) bluntly suppressing language priors can compromise coherence and accuracy of generated content, and (2) processing contrastive inputs adds computational load, significantly slowing inference speed. To address these challenges, we propose Visual Amplification Fusion (VAF), a plug-and-play technique that enhances attention to visual signals within the model's middle layers, where modality fusion predominantly occurs. This approach enables more effective capture of visual features, reducing the model's bias toward language modality. Experimental results demonstrate that VAF significantly reduces hallucinations across various MLLMs without affecting inference speed, while maintaining coherence and accuracy in generated outputs. The code is available at https://github.com/ustc-hyin/ClearSight. + +# 1. Introduction + +In recent years, MLLMs [8, 27, 33, 35, 54, 55] have achieved remarkable progress in the intersecting fields of computer vision and natural language processing, and have been widely applied in tasks such as image captioning and visual question answering. However, these models often encounter the issue of "object hallucination" [15, 29, 32, 37] in practical applications, where the generated textual descriptions do not match the actual objects in the image. This problem highlights an over-reliance on unimodal pri + +ors (especially language priors) [17, 48, 50, 53] during inference, posing potential risks in high-precision applications such as medical diagnosis [18, 46] and autonomous driving [9, 34, 39, 49]. + +To address object hallucination [1, 2, 7, 14], several Contrastive Decoding strategies have been introduced in recent years. Among these, the Visual Contrastive Decoding (VCD) method has shown promise in reducing hallucinations by contrasting output distributions from both original and perturbed visual inputs, thus mitigating the model's excessive reliance on language priors [16, 41]. Notably, contrastive decoding methods do not require additional training or external tools, offering both computational efficiency and versatility, which has garnered them significant attention. However, these methods present two main limitations: + +# Limitations of Contrastive Decoding + +- While reducing over-reliance on language priors, these methods may compromise the coherence and accuracy of generated content. +- Contrastive decoding necessitates separate processing of the original and contrastive inputs, which considerably increases inference time. + +To address these shortcomings, we hope to propose a training-free method that can effectively reduces hallucinations without compromising content quality or inference speed. Our saliency analysis of the model's attention maps reveals that biases toward language in generated content do not arise from an overemphasis on language signals but rather from insufficient attention on visual information during modality fusion. Based on this insight, we introduce a novel, plug-and-play technique to mitigate hallucinations: Visual Amplification Fusion (VAF). + +Our analysis indicates that modality fusion in MLLMs primarily occurs within the middle layers. VAF specifically amplifies visual signals at these middle layers, enabling the model to capture more distinctive visual features during fusion, which in turn reduces false descriptions in generated + +text. This technique not only strengthens the model's visual representations but also retains the beneficial influence of language priors, thus preserving content quality. Furthermore, by eliminating the need to process contrastive samples, VAF maintains inference speed. + +Experimental results validate the effectiveness of the VAF method. Across multiple object hallucination benchmarks, VAF demonstrated notable performance gains, with improvements of approximately $3\%$ on POPE and $7\%$ on MME. In terms of coherence and accuracy of generated responses, VCD caused a roughly $19\%$ decrease on NoCaps, while VAF maintained content quality without negative impacts. Additionally, VCD reduced inference speed by $50\%$ , whereas VAF had virtually no effect on inference speed. + +In summary, the main contributions are as follows: + +- We identify the negative impacts of contrastive decoding methods on both the quality of generated content and model inference speed. +- We analyze the modality fusion mechanism in MLLMs, highlighting its insufficient attention to visual information. +- We introduce the VAF method, which effectively mitigates the object hallucination problem while maintaining inference speed, coherence, and accuracy. +- We demonstrate the significant performance improvements of the VAF method across multiple object hallucination benchmarks. + +# 2. Related work + +# 2.1. Multimodal Large Language Models + +The development of MLLMs [26, 36, 51, 52] has advanced from BERT-based decoders to LLM-based architectures [4, 11, 40, 43-45], enabling improved multimodal relationship capture [6, 10, 24, 25]. Models like BLIP-2 [27] and miniGPT-4 [55] incorporate a Q-Former mechanism, which enhances the alignment between visual and textual inputs, allowing for more precise interactions across modalities. InstructBLIP [12] builds on this approach by adding task-specific instructions, which improve the model's understanding of context-sensitive visual semantics. LLaVA [33] and Qwen-VL [5] utilize simpler linear projection techniques that streamline the alignment process, resulting in improved overall performance on vision-language tasks. However, hallucination issues persist across MLLMs, posing a significant challenge that requires further research. + +# 2.2. Contrastive Decoding Strategies + +In recent years, Contrastive Decoding [19, 21, 22, 28] has emerged as a technique to improve generative model accuracy through contrastive judgment, widely employed to address hallucinations in generated content. For instance, Visual Contrastive Decoding (VCD) [23] contrasts output + +distributions derived from original and distorted visual inputs, effectively reducing the over-reliance on statistical bias and unimodal priors, two essential causes of object hallucinations. Similarly, Instruction Contrastive Decoding (ICD) [47] works by comparing distributions derived from standard and disrupted instructions, thereby removing hallucinated concepts from the original distribution. These contrastive methods help ground generated content closely to visual inputs, resulting in contextually accurate outputs. However, despite these advancements, contrastive decoding faces two primary limitations: slower inference speed and reduced coherence in generated content. To overcome these limitations, we propose the VAF method, which achieves effective hallucination reduction while preserving both inference speed and content coherence. + +# 3. Preliminary and Motivation + +In Sec. 3.1, we illustrate the working mechanism of contrastive decoding to mitigate hallucinations, using Visual Contrastive Decoding as an example. In Sec. 3.2, we analysis two main drawbacks of this approach: its potential to disrupt the coherence and accuracy of generated content, and its tendency to slow down model inference. + +# 3.1. Contrastive Decoding + +We consider a MLLM parametrized by $\theta$ . The model takes as input a textual query $x$ and a visual input $v$ , where $v$ provides contextual visual information to assist the model in generating a relevant response $y$ to the textual query. The response $y$ is sampled auto-regressively from the probability distribution conditioned on the query $x$ and the visual context $v$ . Mathematically, this can be formulated as: + +$$ +\begin{array}{l} y _ {t} \sim p _ {\theta} \left(y _ {t} \mid v, x, y _ {< t}\right) \tag {1} \\ \propto \exp \operatorname {l o g i t} _ {\theta} \left(y _ {t} \mid v, x, y _ {< t}\right) \\ \end{array} +$$ + +where $y_{t}$ denotes the token at time step $t$ , and $y_{< t}$ represents the sequence of generated tokens up to the time step $(t - 1)$ . + +To mitigate the issue of object hallucination in MLLMs, contrastive decoding techniques can be applied. Here, we present Visual Contrastive Decoding (VCD) as a representative approach, shown in Fig. 1. Specifically, given a textual query $x$ and a visual input $v$ , the model generates two distinct output distributions: one conditioned on the original $v$ and the other on the distorted visual input $v'$ , which is derived by applying pre-defined distortions (i.e., Gaussian noise mask) to $v$ . Then, a new contrastive probability distribution is computed by exploiting the differences between the two initially obtained distributions. The new contrastive distribution $p_{vcd}$ is formulated as: + +$$ +\begin{array}{l} p _ {v c d} (y \mid v, v ^ {\prime}, x) = \text {s o f t m a x} \left[ \operatorname {l o g i t} _ {\theta} (y \mid v, x) + \right. \tag {2} \\ \left. \alpha \cdot \left(\operatorname {l o g i t} _ {\theta} (y \mid v, x) - \operatorname {l o g i t} _ {\theta} (y \mid v ^ {\prime}, x)\right) \right], \\ \end{array} +$$ + +where larger $\alpha$ values indicate a stronger amplification of differences between the two distributions ( $\alpha = 0$ reduces to regular decoding). Essentially, VCD serves as a corrective mechanism, reducing hallucinations by contrasting against a distribution predisposed to favoring them. + +![](images/4758f95feb2921349b4687274e578b427177cad7402749623729eb7e539198b3.jpg) +Figure 1. Illustration of Visual Contrastive Decoding. The hallucinated object "Teacher" is suppressed by contrasting with an output distribution prone to hallucinations. This method has two main drawbacks: (1) additional processing of distorted visual inputs greatly increases inference time; (2) subtracting the language prior disrupts content coherence. + +# 3.2. Limitations of Contrastive Decoding + +As contrastive decoding methods do not require training or external tools, they offer high computational efficiency and generalizability, attracting significant attention in academia. However, these methods still have two major drawbacks: a reduction in the quality of generated content and slower inference speed. + +![](images/ba9b2e85d431481e3a8e32ea539a3d303b45e8f405ca48188412fd89af77be26.jpg) +Figure 2. Impact of VCD on Model Performance. CIDEr scores are reported on the Nocaps benchmark, while Accuracy is presented for the ScienceQA benchmark. The use of VCD leads to a significant decline in model performance. + +While contrasting logits of $p_{\theta}(y \mid v, x)$ and $p_{\theta}(y \mid v', x)$ can help reduce over-reliance on language priors and mitigate hallucination in MLLMs-as evidenced by a $4\%$ performance gain on the POPE benchmark using the VCD method-merely decreasing the influence of the language + +modality on the output distribution may undermine the coherence of the generated content, potentially leading to prediction errors. This issue is less pronounced in straightforward object hallucination tasks, where responses are limited to binary options, such as "yes" or "no". However, in more complex tasks, including multiple-choice question answering and image caption, the impact of contrastive learning methods on content quality becomes more significant. + +To verify this, we applied VCD method to LLaVA-v1.5-7B and LLaVA-v1.5-13B models, assessing their performance on the ScienceQA [38] and NoCaps benchmarks. As illustrated in Fig. 2, our findings reveal that, following the application of VCD, model performance decreased by $5\%$ on ScienceQA and by a considerable $45\%$ on NoCaps. These results suggest that in tasks requiring nuanced natural language generation, contrastive decoding methods can substantially impair content quality. + +
ModelMethodScienceQANocaps
LLaVA-v1.5-7BRegular0.141s0.456s
VCD0.293s1.086s
LLaVA-v1.5-13BRegular0.222s0.602s
VCD0.459s1.372s
+ +Table 1. Impact of VCD on Model Inference Speed. The table shows the average inference time per sample (in seconds) on the ScienceQA and Nocaps benchmarks. Applying the VCD method nearly doubled the inference time of the model. + +Contrastive decoding methods notably reduce inference speed because they require calculating the output distribution for additional contrastive samples. For instance, in VCD method, each visual input $v$ necessitates computing the logits of both $p_{\theta}(y \mid v, x)$ and $p_{\theta}(y \mid v', x)$ separately. This doubles the computation load during inference compared to vanilla decoding. We evaluated the inference speed of VCD versus vanilla decoding on ScienceQA. The experimental results, shown in Tab. 1, reveal that VCD's inference time is almost double that of vanilla decoding. + +# 4. Visual Neglect in Modal Fusion + +The primary objective of this section is to examine why MLLMs tend to rely excessively on language priors in their predictions. In Sec. 4.1, saliency analysis reveals that image tokens influence prediction outcomes mainly through interactions with instruction tokens within the middle layers. Sec. 4.2 then compares attention weights across different modalities, showing that the attention given to visual features is notably lower than that allocated to system prompts and user instructions. These findings indicate that visual information is often underutilized in the modality fusion process, resulting in an over-reliance on language priors. + +# 4.1. Mid-layer: Visual-Language Fusion + +To uncover why MLLMs tend to overly rely on language priors and overlook visual content in prediction, it is necessary first to clarify how the model utilizes visual modality information. This section explores the influence of the visual modality on prediction outcomes from the perspective of visual information interaction. + +We employ the saliency technique, a widely used interpretability tool, to highlight key token interactions within the attention mechanism. Following established practices, we utilize Taylor expansion to compute saliency scores for each element of the attention matrix: + +$$ +I _ {l} = \left| \sum_ {h} A _ {h, l} \odot \frac {\partial \mathcal {L} (x)}{\partial A _ {h , l}} \right|. \tag {3} +$$ + +Here, $A_{h,l}$ represents the attention matrix value for the $h$ -th attention head in the $l$ -th layer, $x$ denotes the input, and $\mathcal{L}(x)$ is the loss function of the task, e.g., the cross-entropy objective for question-answering tasks. The saliency matrix $I_{l}$ for the $l$ -th layer is obtained by averaging across all attention heads. The significance of information flow from the $j$ -th token to the $i$ -th token in MLLMs is represented by $I_{l}(i,j)$ . + +To draw a clearer picture of visual information flow in MLLMs, we introduce two quantitative metrics based on $I_{l}(i,j)$ , with a particular focus on the information interaction involving image tokens. The definitions of the two quantitative metrics follow below. + +$S_{vv}$ , measuring the importance of information flow among image tokens: + +$$ +S _ {v v} = \frac {\sum_ {(i , j) \in C _ {v v}} I _ {l} (i , j)}{\left| C _ {v v} \right|} \tag {4} +$$ + +$$ +C _ {v v} = \{(i, j): i, j \in \mathcal {V}, i \geq j \}. +$$ + +$S_{vt}$ , measuring the importance of information flow from image tokens to instruction tokens: + +$$ +S _ {v t} = \frac {\sum_ {(i , j) \in C _ {v t}} I _ {l} (i , j)}{\left| C _ {v t} \right|} \tag {5} +$$ + +$$ +C _ {v t} = \{(i, j): i \in \mathcal {T}, j \in \mathcal {V} \}. +$$ + +Here, $\mathcal{V}$ represents the index set of image tokens, derived from features learned by pre-trained visual encoders, while $\mathcal{T}$ denotes the index set of instruction tokens, specifying requests or questions related to the images. $S_{vv}$ and $S_{vt}$ are utilized to analyze the mechanisms of visual information processing in MLLMs. We define attention interactions among image tokens as intra-visual information flow and those between image and instruction tokens as visual-textual information flow. + +We conducted experiments with the LLaVA-v1.5-7B model on the MS COCO dataset under the POPE benchmark, sampling 500 examples for evaluation. Fig. 3 underscores the critical role of the visual-textual information flow within the model's middle layers, specifically from the 8-th to the 15-th layer. This observation indicates that in these layers, visual information interacts intensively with textual information via attention mechanisms, which substantially influences the prediction outcomes. + +![](images/591b4f3e08189d0211b7d6030a04e8d5102e8a900d3a71f513acb19ff213f9c2.jpg) +Figure 3. The importance of intra-visual flow and visual-textual flow across various layers. The visual-textual information flow in the middle layers has a significant impact on prediction outcomes. + +# 4.2. Attention Imbalance Across Modalities + +Sec. 4.1 reveals that the middle layers facilitate crucial fusion, integrating visual and textual inputs into cross-modal semantic representations that drive final predictions. Accordingly, this section will delve deeper into the attention to visual inputs throughout the modality fusion process. + +![](images/5a8021e71dc27b5039408120018516907f081adc2b0b7a6b99e3f5b145ca2cf7.jpg) +Figure 4. Attention Distribution of Modal Information Across Model Layers. In the middle layers, the model allocates insufficient attention to visual features while disproportionately focusing on system prompts. + +We define the attention allocation, $\lambda$ , as the aggregate attention score assigned to a specific type of token within a single layer. Accordingly, the attention allocation for sys + +tem prompts, visual features, and user instructions in the $l$ -th layer can be computed as follows: + +$$ +\lambda_ {s y s} ^ {l} = \sum_ {i \in \mathcal {T}} \sum_ {j \in \mathcal {S}} A _ {l} (i, j), +$$ + +$$ +\lambda_ {v i s} ^ {l} = \sum_ {i \in \mathcal {T}} \sum_ {j \in \mathcal {V}} A _ {l} (i, j), \tag {6} +$$ + +$$ +\lambda_ {i n s} ^ {l} = \sum_ {i \in \mathcal {T}} \sum_ {j \in \mathcal {T}} A _ {l} (i, j). +$$ + +In this context, $A_{l}$ represents the attention matrix averaged across all attention heads, while $S$ represents the indices of system tokens. The measures $\lambda_{sys}^{l}, \lambda_{vis}^{l}$ , and $\lambda_{ins}^{l}$ provide insight into the distribution of attention to different modalities across various layers, aiding in understanding the reasons for the underutilization of visual information during the modality fusion process. + +The experimental setup aligns with that described in Sec. 4.1. Fig. 4 illustrates the allocation of attention to different modalities across the model's layers. In the middle layers, attention to visual features is markedly lower than that given to system prompts and user instructions. This suggests that during the critical process of modality fusion, the model's focus on visual input is insufficient. As a result, visual information is underutilized, leading to an output distribution skewed toward language priors. + +# 4.3. Insights + +Based on the experimental results presented in Sec. 4.1 and Sec. 4.2, two significant conclusions can be drawn: + +- The model performs the crucial fusion of visual and textual modalities in the middle layers, creating cross-modal semantic representations that drive the final predictions. +- During this critical fusion process, the model demonstrates inadequate attention to the visual modality. + +These findings indicate that models fail to fully utilize visual information, resulting in an excessive dependence on language priors and, subsequently, the occurrence of hallucination phenomena. + +# 5. Visual Amplification Fusion + +Building on the insights presented in Sec. 4, we introduce a hallucination mitigation method called Visual Amplification Fusion (VAF). As illustrated in Fig. 5, This approach heightens attention to visual information during modality fusion, effectively reducing the excessive dependency on language priors and ensuring that the generated content is closely grounded to visual inputs. + +# 5.1. Attention Redistribution + +As outlined in Sec. 4, the model performs crucial fusion of visual and textual modalities within the middle layers. + +![](images/a79b173bab09191aa22706ca508242c6b90a6e6be41e3fc5b3e73e81ffc48739.jpg) +Figure 5. Illustration of the Visual Amplification Fusion Method. In the middle layers, we select attention heads highly responsive to visual information, amplifying their focus on visual features while reducing unnecessary attention to system prompts. + +However, the attention allocated to visual modality information during this process remains insufficient. To address this, we adjust the attention weights in these layers to achieve a more balanced focus. + +![](images/cd4f57fe7e81f9e8b3921d58b7979766e51164cd331cbd4c3783c2b76d61d9c5.jpg) +Figure 6. Effect of Enhanced Visual Attention on Hallucination Suppression. Increasing attention to visual features in the fusion process of the model's middle layers successfully reduces hallucinations, enabling the model to correct its grape color prediction from "green" to "red". + +Let $A_{l,h}$ denote the attention matrix of the $h$ -th attention head in the $l$ -th layer, and $Z_{l,h}$ represent its corresponding attention score matrix, defined as: + +$$ +A _ {l, h} = \operatorname {s o f t m a x} \left(Z _ {l, h}\right). \tag {7} +$$ + +Our objective during the modality fusion process is to amplify the model's attention to visual features while curbing an overemphasis on system prompts. This adjustment facilitates improved integration of visual information and reduces over-reliance on language priors. To achieve this, we modify the attention score matrix in the middle layers (i.e., $8 < l < 15$ ) as follows: + +$$ +\hat {Z} _ {l, h} = Z _ {l, h} + \alpha \cdot M _ {l, h} ^ {\text {e n h}} \circ Z _ {l, h} - \beta \cdot M _ {l, h} ^ {\text {s u p}} \circ Z _ {l, h}. \tag {8} +$$ + +Here, $\alpha$ is the enhancement coefficient ( $\alpha > 0$ ), where larger values indicate stronger amplification of visual attenuation. + +tion. The suppression coefficient $\beta$ ( $0 < \beta < 1$ ) determines the extent of attention suppression directed at system prompts. The enhancement and suppression mask matrices, $M_{l,h}^{enh}$ and $M_{l,h}^{sup}$ respectively, are defined to guide the modulation of attention elements: + +$$ +\begin{array}{l} M _ {l, h} ^ {e n h} (i, j) = \mathbb {I} (i \in \mathcal {T}, j \in \mathcal {V}), \\ 1. 5 ^ {\text {s u p}} (i, j) = \mathbb {I} (i = \mathcal {T}, j = \mathcal {Q}). \end{array} \tag {9} +$$ + +$$ +M _ {l, h} ^ {s u p} (i, j) = \mathbb {I} (i \in \mathcal {T}, j \in \mathcal {S}). +$$ + +These modifications optimize attention allocation by enhancing the model's focus on visual features during modality fusion and minimizing superfluous attention to system prompts. As illustrated in Fig. 6, preliminary analysis indicates that this approach effectively mitigates hallucination issues by promoting greater attention to visual information. + +# 5.2. Visual Perception Restriction + +Enhancing visual attention across all attention heads in the middle layers can be overly aggressive and may negatively impact content generation. To address this, we propose a selective enhancement strategy. Specifically, we identify and isolate the attention heads that exhibit higher sensitivity to visual information, which we term visual perception heads. We then restrict the visual attention enhancement to these visual perception heads, ensuring better utilization of visual information while maintaining overall model performance. + +In the model, attention heads that allocate more attention to visual features demonstrate heightened sensitivity to visual information. Let $A_{l,h}$ represent the attention matrix of the $h$ -th attention head in the $l$ -th layer of the model, with its corresponding visual attention allocation denoted by $\lambda_{\mathrm{vis}}^{l,h}$ . In each attention layer, we identify the attention heads whose visual attention allocation fall within the top + +$50\%$ and designate them as visual perception heads, subsequently redistributing their attention. The attention matrices of the remaining attention heads are kept unchanged. + +# 6. Experiment + +This section demonstrates the effectiveness of the proposed VAF method in mitigating hallucinations. Sec. 6.1 outlines the experimental setup, detailing the evaluation benchmarks and VAF parameter configurations. Sec. 6.2 then presents the experimental results from three perspectives: reduction of hallucinations, coherence of generated content, and inference speed. Finally, Sec. 6.3 further verifies the contribution of each VAF component through ablation studies. + +# 6.1. Experimental Settings + +In Sec. 6.1.1, we present the selected datasets and evaluation metrics. Sec. 6.1.2 details the chosen MLLM backbone models, and Sec. 6.1.3 outlines the baseline settings. + +# 6.1.1. Datasets & Evaluation Metrics + +Polling-based Object Probing Evaluation (POPE). POPE [30] is a novel framework designed to evaluate object hallucinations in MLLMs. Departing from traditional caption-based approaches, POPE frames hallucination detection as a binary task by posing straightforward yes-or-no questions regarding the presence of specific objects in an image (e.g., "Is there a chair in the image?"). Performance on POPE is measured across four metrics: Accuracy, Precision, Recall, and F1 score, allowing for a thorough evaluation of hallucinations in MLLMs. + +Multimodal Model Evaluation (MME). MME [13] benchmark provides a comprehensive framework for evalu + +
CategoryMethodLLaVA-v1.5-7BLLaVA-v1.5-13BQwen-VL-Chat-7B
AccuracyF1-scoreAccuracyF1-scoreAccuracyF1-score
RandomRegular87.8↑0.087.5↑0.087.6↑0.087.4↑0.088.2↑0.087.9↑0.0
VCD88.4↑0.687.7↑0.288.9↑1.387.8↑0.489.1↑0.988.4↑0.5
ICD88.1↑0.387.6↑0.188.1↑0.587.6↑0.288.9↑0.788.1↑0.2
VAF89.6↑1.889.3↑1.890.1↑2.589.9↑2.590.0↑1.889.7↑1.8
PopularRegular82.5↑0.083.2↑0.082.7↑0.084.1↑0.082.4↑0.083.1↑0.0
VCD83.1↑0.684.1↑0.983.7↑1.085.1↑1.083.0↑0.684.1↑1.0
ICD82.1↓0.482.9↓0.382.9↑0.284.3↑0.283.2↑0.884.5↑1.4
VAF84.5↑2.084.9↑1.785.2↑2.586.4↑2.384.9↑2.585.1↑2.0
AdversarialRegular77.6↑0.079.4↑0.077.8↑0.079.5↑0.077.2↑0.078.9↑0.0
VCD78.1↑0.579.6↑0.278.2↑0.479.7↑0.278.8↑1.680.1↑1.2
ICD78.5↑0.979.9↑0.579.1↑1.380.1↑0.678.1↑0.979.2↑0.3
VAF80.1↑2.581.0↑1.680.7↑2.981.7↑2.280.4↑3.281.2↑2.3
+ +Table 2. Performance on POPE. Results are averaged across the MS-COCO, A-OKVQA, and GQA datasets. The VAF method demonstrates superior hallucination suppression across all three MLLMs. The best performance for each setting is highlighted in red. + +
ModelMethodObject-levelAttribute-levelTotal Score
ExistenceCountPositionColor
LLaVA-v1.5-7BRegular185.00↑0.00146.67↑0.00128.33↑0.00150.00↑0.00610.00↑0.00
VCD185.00↑0.00141.33↓5.34128.33↑0.00153.00↑3.00607.66↓2.34
ICD185.00↑0.00148.33↑1.66126.66↓1.67148.33↓1.67608.32↓1.68
VAF195.00↑10.00158.33↑11.66128.33↑0.00155.00↑5.00636.67↑26.67
LLaVA-v1.5-13BRegular185.00↑0.00155.00↑0.00133.33↑0.00165.00↑0.00638.33↑0.00
VCD185.00↑0.00155.00↑0.00130.00↓3.33168.33↑3.33638.33↑0.00
ICD183.33↓1.67153.33↓1.67131.67↓1.66165.00↑0.00633.33↓5.00
VAF195.00↑10.00160.00↑5.00136.67↑3.34170.00↑5.00661.67↑23.34
Qwen-VL-7BRegular158.33↑0.00150.00↑0.00128.33↑0.00170.00↑0.00606.66↑0.00
VCD158.33↑0.00150.00↑0.00133.33↑5.00175.00↑5.00616.66↑10.00
ICD128.33↓30.00151.67↑1.67128.33↑0.00170.00↑0.00578.33↓28.33
VAF165.00↑6.67155.00↑5.00133.33↑5.00175.00↑5.00628.33↑21.67
+ +Table 3. Results on the MME subset. Across three MLLMs, the VAF method achieved the most effective suppression of both object-level and attribute-level hallucinations. The highest scores in each setting are highlighted in red. + +ating MLLMs across both perceptual and cognitive dimensions. It consists of ten perception-oriented tasks and four cognition-oriented tasks, with model performance assessed through accuracy metrics. In addition to the full dataset, we leverage specific subsets, such as object existence and counting to analyze object-level hallucinations, while position and color subsets are employed to examine attribute-level hallucinations. + +Novel Object Captioning at Scale (Nocaps). NoCaps [3] benchmark is designed to evaluate image captioning models on their ability to describe novel objects absent from standard datasets like COCO. Model performance is quantified using the CIDEr score, providing a basis to assess the coherence and accuracy of generated captions in response to images containing unfamiliar objects. + +# 6.1.2. MLLM Backbones + +In comparison to the Q-former structure, linear projection demonstrates greater efficiency in aligning visual and textual features. This advantage is evident in MLLMs with linear projection architectures, such as LLaVA and Qwen-VL, which outperform Q-former-based MLLMs like Instruct-BLIP and MiniGPT4. Based on these findings, we selected three linear-projection-based MLLMs, specifically LLaVA-v1.5-7B, LLaVA-v1.5-13B [35], and Qwen-VL-7B [5], to evaluate the effectiveness of our proposed VAF method. Detailed prompt templates for each model across various benchmarks are included in Sec. 10. + +# 6.1.3. Baseline Settings. + +We primarily compared our approach to the VCD [23] and ICD [47] methods. VCD mitigates hallucinations by contrasting output distributions derived from original and dis + +torted visual inputs, while ICD reduces hallucinated concepts by comparing distributions generated with standard versus disrupted instructions. To ensure consistency and reproducibility in our comparisons, all methods use greedy search. Unless specified otherwise, our experiments set $\beta = 0.1$ and $\alpha = 0.15$ . + +# 6.2. Results and Analysis + +Sec. 6.2.1 examines the effectiveness of various methods in mitigating hallucinations, while Sec. 6.2.2 assesses their impact on the quality of generated content. Sec. 6.2.3 then analyzes the influence of each method on inference speed. Additional experimental results are provided in Sec. 8. + +# 6.2.1. Hallucination Mitigation + +Tab. 2 presents the experimental results of the VAF method on the POPE benchmark, with results averaged across the MSCOCO [31], A-OKVQA [42], and GQA [20] datasets. Applied to both the LLaVA-v1.5 model family and the Qwen-VL model, the VAF method consistently surpasses the VCD and ICD methods in reducing hallucinations. Tab. 3 further highlights the performance of VAF on the MME benchmark, demonstrating its effectiveness in suppressing both object-level and attribute-level hallucinations. + +# 6.2.2. Coherence of Generated Content + +Tab. 4 presents the experimental results for various methods on the Nocaps and ScienceQA datasets. It is evident that VCD and ICD substantially degrade the quality of the generated content. Specifically, on the Nocaps dataset, VCD and ICD reduce CIDEr scores by $18\%$ and $27\%$ , respectively. This degradation primarily arises from the crude disruption of language priors by contrastive decoding methods, + +
ModelDecodingScienceQANocaps
AccuracyCIDEr
LLaVA-v1.5-7BRegular68.078.7
VCD64.565.7
ICD62.462.3
VAF68.578.8
LLaVA-v1.5-13BRegular71.682.6
VCD70.068.9
ICD69.260.3
VAF71.782.3
+ +which leads to generated content lacking coherence and accuracy. By contrast, our method demonstrates minimal negative impact on prediction results, maintaining both coherence and accuracy effectively. + +# 6.2.3. Inference Speed + +Fig. 7 illustrates the impact of different strategies on model inference speed within the Nocaps dataset. In comparison, the VCD and ICD methods nearly double the inference time due to the need to process contrastive input samples, whereas the VAF method has minimal impact on the inference speed of multimodal large language models. + +![](images/a52934bd49c85c2eeb682f6e85fd76b33397436a158a1df077b2d7eddeb2b2c9.jpg) +Figure 7. Comparison of different strategies on inference speed. The VCD and ICD methods reduce inference speed by $50\%$ , whereas the VAF method shows minimal impact. + +# 6.3. Ablation Study + +Ablation studies on the enhancement coefficient $\alpha$ were conducted using the COCO-Random dataset within the POPE benchmark to understand its influence on model performance. Fig. 8 demonstrates that when $0 < \alpha < 0.25$ , model hallucinations are effectively suppressed. However, when $\alpha$ surpasses 0.25, performance starts to degrade. We + +propose that this reduction in performance may stem from an excessive focus on visual features, disrupting the balanced integration of language information and diminishing overall model effectiveness. + +![](images/43f9cde0505695001d35da1fe6336854987e8cb1c5daf157a6cf91f7c5f9b531.jpg) +Figure 8. Ablation study of $\alpha$ on the POPE benchmark. + +We performed ablation studies on the visual perception restriction mechanism, evaluating its impact on the POPE and Nocaps benchmarks. Tab. 5 highlights the effects of restricting attention reallocation to visual perception heads. Increasing attention to visual features alone reduces model hallucinations, while confining this reallocation strategy to visual perception heads minimizes adverse effects on content quality. More ablation studies can be found in Sec. 9. + +Table 4. Results on SQA and Nocaps datasets. The highest and second-highest scores are marked in red and blue, respectively. + +
ModelVisual RestrictionPOPENocaps
LLaVA-7B89.878.8
X89.976.4
LLaVA-13B90.282.3
X90.081.1
+ +Table 5. Ablation Study of Visual Perception Restriction Mechanism. Restricting attention redistribution to the visual perception heads more effectively preserves the quality of generated content. + +# 7. conclusion + +In this paper, we identify two key drawbacks of using contrastive decoding to mitigate hallucinations in MLLMs: reduced quality of generated content and slower inference speed. To address these challenges, we propose a novel approach, Visual Amplification Fusion, which effectively mitigates hallucinations while preserving both inference speed and content generation quality. By enhancing the attention to visual features during modality fusion, VAF minimizes the over-reliance on language priors, ensuring a high degree of consistency between generated content and visual inputs. Extensive experiments across multiple benchmarks and MLLMs demonstrate that VAF provides a clear advantage in hallucination mitigation. + +# Acknowledgements + +This work is supported by the National Natural Science Foundation of China under Grant 62176246. This work is also supported by Anhui Province Key Research and Development Plan (202304a05020045), Anhui Province Natural Science Foundation (2208085UD17) and National Natural Science Foundation of China under Grant 62406098. + +# References + +[1] Vedika Agarwal, Rakshith Shetty, and Mario Fritz. Towards causal vqa: Revealing and reducing spurious correlations by invariant and covariant semantic editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9690-9698, 2020. 1 +[2] Aishwarya Agrawal, Dhruv Batra, and Devi Parikh. Analyzing the behavior of visual question answering models. arXiv preprint arXiv:1606.07356, 2016. 1 +[3] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV). IEEE, 2019. 7 +[4] Jinze Bai, Shuai Bai, and et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 2 +[5] Jinze Bai, Shuai Bai, and et al. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 2, 7 +[6] Rohan Bavishi, Erich Elsen, and et al. Introducing our multimodal models, 2023. 2 +[7] Ali Furkan Biten, Lluís Gómez, and Dimosthenis Karatzas. Let there be a clock on the beach: Reducing object hallucination in image captioning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1381-1390, 2022. 1 +[8] Keqin Chen, Zhao Zhang, and et al. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 1 +[9] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. arXiv preprint arXiv:2310.01957, 2023. 1 +[10] Zhe Chen, Weiyun Wang, and et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2 +[11] Wei-Lin Chiang and Zhuohan et al Li. Vicuna: An opensource chatbot impressing gpt-4 with $90\%$ chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2023. 2 +[12] Wenliang Dai and Junnan Li et al. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 2 +[13] Chaoyou Fu, Peixian Chen, and et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 6 + +[14] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 1 +[15] Anisha Gunjal, Jihan Yin, and Erhan Bas. Detecting and preventing hallucinations in large vision language models. arXiv preprint arXiv:2308.06394, 2023. 1 +[16] Vipul Gupta, Zhuowan Li, Adam Kortylewski, Chenyu Zhang, Yingwei Li, and Alan Yuille. Swapmix: Diagnosing and regularizing the over-reliance on visual context in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5078-5088, 2022. 1 +[17] Yudong Han, Liqiang Nie, Jianhua Yin, Jianlong Wu, and Yan Yan. Visual perturbation-aware collaborative learning for overcoming the language prior problem. arXiv preprint arXiv:2207.11850, 2022. 1 +[18] Mingzhe Hu, Shaoyan Pan, Yuheng Li, and Xiaofeng Yang. Advancing medical imaging with language models: A journey from n-grams to chatgpt. arXiv preprint arXiv:2304.04920, 2023. 1 +[19] Qidong Huang, Xiaoyi Dong, Pan Zhang, Bin Wang, Conghui He, Jiaqi Wang, Dahua Lin, Weiming Zhang, and Nenghai Yu. Opera: Alleviating hallucination in multimodal large language models via over-trust penalty and retrospection-allocation. In CVPR, pages 13418-13427, 2024. 2 +[20] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In CVPR, pages 6700-6709, 2019. 7 +[21] Fushuo Huo, Wenchao Xu, Zhong Zhang, Haozhao Wang, Zhicheng Chen, and Peilin Zhao. Self-introspective decoding: Alleviating hallucinations for large vision-language models, 2024. 2 +[22] Chaoya Jiang, Haiyang Xu, and et al. Hallucination augmented contrastive learning for multimodal large language model. In CVPR, pages 27036-27046, 2024. 2 +[23] Sicong Leng, Hang Zhang, and et al. Mitigating object hallucinations in large vision-language models through visual contrastive decoding. In CVPR, pages 13872-13882, 2024. 2, 7 +[24] Bo Li, Yuanhan Zhang, and et al. Mimic-it: Multi-modal in-context instruction tuning. arXiv preprint arXiv:2306.05425, 2023. 2 +[25] Bo Li, Kaichen Zhang, and et al. Llava next: Stronger llms supercharge multimodal capabilities in the wild, 2024. 2 +[26] Chunyuan Li, Cliff Wong, and et al. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. In NeurIPS, pages 28541-28564, 2023. 2 +[27] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023. 1, 2 +[28] Xiang Lisa Li, Ari Holtzman, and et al. Contrastive decoding: Open-ended text generation as optimization. arXiv preprint arXiv:2210.15097, 2022. 2 + +[29] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 1 +[30] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, pages 292-305, 2023. 6 +[31] Tsung-Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014. 7 +[32] Fuxiao Liu, Kevin Lin, Linjie Li, Jianfeng Wang, Yaser Ya-coob, and Lijuan Wang. Mitigating hallucination in large multi-modal models via robust instruction tuning. arXiv preprint arXiv:2306.14565, 2023. 1 +[33] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, pages 34892-34916, 2023. 1, 2 +[34] Haokun Liu, Yaonan Zhu, Kenji Kato, Izumi Kondo, Tadayoshi Aoyama, and Yasuhisa Hasegawa. Lm-based human-robot collaboration framework for manipulation tasks. arXiv preprint arXiv:2308.14972, 2023. 1 +[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In CVPR, pages 26296-26306, 2024. 1, 7 +[36] Zhi-Song Liu, Robin Courant, and Vicky Kalogeiton. Funnynet-w: Multimodal learning of funny moments in videos in the wild. International Journal of Computer Vision, pages 1-22, 2024. 2 +[37] Holy Lvenia, Wenliang Dai, Samuel Cahyawijaya, Ziwei Ji, and Pascale Fung. Negative object presence evaluation (nope) to measure object hallucination in vision-language models. arXiv preprint arXiv:2310.05338, 2023. 1 +[38] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In The 36th Conference on Neural Information Processing Systems (NeurIPS), 2022. 3 +[39] Jinjie Mai, Jun Chen, Bing Li, Guocheng Qian, Mohamed Elhoseiny, and Bernard Ghanem. Llm as a robotic brain: Unifying egocentric memory and control. arXiv preprint arXiv:2304.09349, 2023. 1 +[40] AI Meta. Introducing meta llama 3: The most capable openly available llm to date. Meta AI, 2024. 2 +[41] Yulei Niu, Kaihua Tang, Hanwang Zhang, Zhiwu Lu, XianSheng Hua, and Ji-Rong Wen. Counterfactual vqa: A cause-effect look at language bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12700-12710, 2021. 1 +[42] Dustin Schwenk, Apoorv Khandelwal, and et al. A-okvqa: A benchmark for visual question answering using world knowledge. In ECCV, pages 146–162, 2022. 7 +[43] Rohan Taori, Ishaan Gulrajani, and et al. Stanford alpaca: an instruction-following llama model (2023). URL https://github.com/tatsu-lab/stanford_alpaca, 1(9), 2023. 2 + +[44] Hugo Touvron, Thibaut Lavril, and et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. +[45] Hugo Touvron, Louis Martin, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 2 +[46] Sheng Wang, Zihao Zhao, Xi Ouyang, Qian Wang, and Dinggang Shen. Chatcad: Interactive computer-aided diagnosis on medical image using large language models. arXiv preprint arXiv:2302.07257, 2023. 1 +[47] Xintong Wang, Jingheng Pan, and et al. Mitigating hallucinations in large vision-language models with instruction contrastive decoding. arXiv preprint arXiv:2403.18715, 2024.2, 7 +[48] Yike Wu, Yu Zhao, Shiwan Zhao, Ying Zhang, Xiaojie Yuan, Guoqing Zhao, and Ning Jiang. Overcoming language priors in visual question answering via distinguishing superficially similar instances. arXiv preprint arXiv:2209.08529, 2022. 1 +[49] Zhenyu Wu, Ziwei Wang, Xiuwei Xu, Jiwen Lu, and Haibin Yan. Embodied task planning with large language models. arXiv preprint arXiv:2307.01848, 2023. 1 +[50] Hong Yan, Lijun Liu, Xupeng Feng, and Qingsong Huang. Overcoming language priors with self-contrastive learning for visual question answering. *Multimedia Tools and Applications*, 82(11):16343–16358, 2023. 1 +[51] Qinghao Ye, Haiyang Xu, and et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2 +[52] Shilong Zhang, Peize Sun, and et al. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023. 2 +[53] Ren Zhibo, Wang Huizhen, Zhu Muhua, Wang Yichao, Xiao Tong, and Zhu Jingbo. Overcoming language priors with counterfactual inference for visual question answering. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics, pages 600-610, 2023. 1 +[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 130(9):2337-2348, 2022. 1 +[55] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2 + +# ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models + +Supplementary Material + +# 8. Additional Experimental Results + +Sec. 8.1 presents the additional experimental results across all tasks in the MME benchmark. Sec. 8.2 details the experimental outcomes on the three datasets within the POPE benchmark. Sec. 8.3 compares the inference speeds and memory usage of various methods on ScienceQA and Nocaps. Sec. 8.4 highlights case studies of the VAF method on the LLaVA-Bench dataset. + +# 8.1. Detailed Experimental Results on MME + +Fig. 9 and Fig. 10 present the performance of the LLaVA model family on perception-related tasks within the MME benchmark. Models utilizing the VAF method demonstrate significantly better performance compared to those employing the VCD method. Notably, VAF achieves consistent leadership across all tasks with the LLaVA-v1.5-13B model, likely due to its ability to balance attention between + +![](images/11b3663bf0024698189fe6f3112ff21dcb6c23465d60d7948074f56300a00e95.jpg) +Figure 9. Performance of LLaVA-v1.5-7B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks. + +![](images/451a93053e14534301efd50b796019652e06c5d881b0caef998aa921869931cd.jpg) +Figure 10. Performance of LLaVA-v1.5-13B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks. + +![](images/d68d5414ee93abce830c76d8bad3c0dd6824d54150d5ca53159dc02a3792d344.jpg) +Figure 11. Performance of the LLaVA-v1.5-7B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method. + +![](images/2888d89da363d0b60fede2e6cffd441ac8299d0e420dbeb7c1d6e1be961f06d9.jpg) +Figure 12. Performance of the LLaVA-v1.5-13B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method. + +visual and language modalities, ensuring generated content aligns more closely with visual inputs. + +Fig. 11 and Fig. 12 illustrate the performance of LLaVA model family on cognition-related tasks within the MME benchmark. The application of the VCD method significantly impaired the model's performance on these tasks, likely due to its disruptive effect on linguistic priors. In contrast, VAF method not only avoided such negative impacts but also resulted in a slight performance improvement. This improvement is attributed to VAF's ability to precisely resolve the model's tendency to overlook visual features during the critical fusion stage, facilitating better integration of visual information while preserving its effective use of linguistic information. + +# 8.2. Detailed Experimental Results on POPE + +Tab. 6 and Tab. 9 summarize the experimental results of the LLaVA-v.15 model family on the MSCOCO, A-OKVQA, and GQA datasets within the POPE benchmark. The results highlight that our approach consistently delivers more stable and significantly improved hallucination suppression compared to the VCD method. This advantage stems from our direct enhancement of attention to visual features during the modality fusion process, enabling balanced outputs across both visual and linguistic modalities. In contrast, the VCD method relies on suppressing language priors to indirectly enhance attention to visual information. Decoding method employed in all experiments utilizes greedy search. + +
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.294.281.587.4
VCD88.594.481.887.6
VAF89.892.986.289.4
PopularRegular86.189.981.585.5
VCD86.390.081.785.8
VAF87.588.686.287.4
AdversarialRegular82.382.981.382.1
VCD82.382.981.682.4
VAF83.486.878.982.6
A-OKVQARandomRegular87.687.687.787.6
VCD87.787.887.687.8
VAF89.491.786.689.1
PopularRegular81.978.487.782.8
VCD82.178.587.983.1
VAF84.282.686.684.6
AdversarialRegular74.368.887.777.1
VCD72.468.087.476.7
VAF77.272.986.679.2
GQARandomRegular88.087.189.388.2
VCD88.687.489.588.8
VAF89.590.888.089.4
PopularRegular79.474.489.381.1
VCD79.974.689.581.7
VAF81.878.388.082.9
AdversarialRegular76.370.689.378.9
VCD75.270.289.978.3
VAF79.775.488.081.2
+ +Table 6. Experimental results of LLaVA-1.5-7B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red. + +
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular88.25:3214.5G0.111s
VCD88.510:3115.7G0.210s
VAF89.85:4814.5G0.116s
LLaVA-v1.5-13BRegular88.48:3926.7G0.173s
VCD88.619:3827.8G0.392s
VAF90.28:4526.7G0.175s
+ +Table 7. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on POPE benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red. + +# 8.3. Comparison of Inference Speeds + +Tab. 7 and Tab. 8 assess the impact of various methods on the LLaVA-v1.5 model family, focusing on inference speed + +and GPU memory usage. The results indicate that VCD significantly slows down inference, whereas our proposed method has a minimal effect. Furthermore, our method introduces no additional GPU memory requirements, in con + +trast to VCD, which incurs substantial GPU memory overhead. This efficiency is achieved because our approach eliminates the need for extra processing of contrastive inputs, thereby significantly reducing computational over + +head. All experiments were performed on a server equipped with a single A800 80G GPU, employing greedy search as the decoding strategy. + +
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular68.00:36:3914.5G0.488s
VCD64.51:18:4715.7G1.058s
VAF68.50:36:4114.5G0.489s
LLaVA-v1.5-13BRegular71.60:45:2026.7G0.604s
VCD70.01:46:5927.8G1.426s
VAF71.70:48:2426.7G0.645s
+ +Table 8. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on Nocaps benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red. + +
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.494.681.687.6
VCD88.695.081.887.7
VAF90.294.285.689.7
PopularRegular86.991.381.686.2
VCD87.091.482.086.4
VAF88.490.685.688.0
AdversarialRegular83.484.981.483.1
VCD83.785.181.783.1
VAF84.583.885.584.7
A-OKVQARandomRegular88.088.887.187.9
VCD88.289.287.587.9
VAF89.491.486.889.1
PopularRegular83.981.787.184.3
VCD84.281.787.384.3
VAF86.085.486.886.1
AdversarialRegular76.071.087.178.2
VCD76.471.287.178.3
VAF78.274.186.879.9
GQARandomRegular88.387.889.088.4
VCD88.388.189.388.5
VAF89.787.892.289.9
PopularRegular83.379.889.084.1
VCD83.280.089.284.1
VAF85.283.088.685.7
AdversarialRegular78.573.389.080.4
VCD78.773.388.980.3
VAF80.876.688.682.1
+ +Table 9. Experimental results of LLaVA-1.5-13B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red. + +![](images/a65c83187bf6f743d3ed6f698d34b951a870192e87a0223814be4491c320a2c8.jpg) +Prompt: How many uncut fruits are in the image? +Vanilla Decoding: There are five uncut fruits in the image. +Visual Contrastive Decoding: There are four uncut fruits in the image. + +![](images/1a904075e7cad436ffd0ce8802753c74ac8c0ab0e5717e2ca6c9dfb9b77aa3ea.jpg) +Prompt: How many coffee mugs are in the set? +Vanilla Decoding: There are four coffee mugs in the set. +Visual Contrastive Decoding: There are three coffee mugs in the set. +Visual Amplification Fusion: There are three coffee mugs in the set. + +![](images/d96465a44578b92b95d86a245007c48c89608d96e2b641f2c2af97ae5787d3c4.jpg) +Visual Amplification Fusion: There are three uncut fruits in the image. +Figure 13. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on numerical perception tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red. +Figure 14. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on complex reasoning tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red. + +# Prompt: What is unusual about this image? + +Vanilla Decoding: The unusual aspect of this image is that a man is ironing clothes while standing on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is usually done indoors, in a more controlled environment. The man's actions pose a risk to both himself and the passengers of the taxi, as he could lose his balance and fall, causing injury or damage to the vehicle. Additionally, the presence of a washing machine in the back of the taxi is also unconventional, as it is not a common feature in taxis. + +Visual Contrastive Decoding: An unconventional aspect of the image is the presence of a man standing on the back of a yellow car while holding multiple clothes, seemingly ironing them. Ironing is typically done in a more controlled and stationary environment, such as a home or an ironing board in an indoor setting. However, in this case, the man is in a busy city driving a yellow taxi, which defies the traditional approach to ironing and creates an unusual scene. It may draw the attention of onlookers and passersby due to the uncommon behavior, making this image memorable. + +Visual Amplification Fusion: The unusual aspect of this image is that a man is ironing clothes while sitting on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is generally done in a more controlled environment, such as a home or a laundromat. The man's position on the back of the taxi, combined with the act of ironing, creates an unconventional and unexpected scene. + +![](images/5e53b49c44c6e0501a0e4aa3ecd37b74a84fb37345c7e5c3fb0250cb93d4c76f.jpg) +Prompt: Describe this photo in detail. + +Vanilla Decoding: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is sitting in a chair, looking directly at the viewer. The painting is quite detailed, capturing the dog's facial expression and the intricate design of the dress. In the background, there is a person partially visible on the left side of the painting. The overall scene is quite captivating and unique, as it combines the artistic style of the Renaissance with the subject matter of a dog. + +Visual Contrastive Decoding: The image features a unique painting on a canvas, depicting a portrait of a bulldog in Renaissance style, giving it a Renaissance era appearance. In this painting, the bulldog is presented as a woman, complete with a veil on its head, looking directly to its right. + +The painting also includes several elements of landscape and architecture, giving a sense of a real setting. Apart from the central dog figure, there are three smaller figures or figures in the background, adding more depth and life to the painting. In the forefront, a chair is visible, positioned to the left of the dog, further enhancing the painting's depiction of a realistic scene. + +Visual Amplification Fusion: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is positioned in the center of the painting, and appears to be staring at the viewer. + +![](images/88514ab10ec1b0add4b169678541b00295fc37deda1c2ba83f677b1b7cc5202d.jpg) +Figure 15. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red. +Prompt: Describe this photo in detail. +Figure 16. Additional example of VAF correcting model hallucinations on LLaVA-Bench, primarily focusing on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red. + +Vanilla Decoding: The image features a beautiful display of three ripe and juicy passion fruit, cut in half to reveal their exquisite white meat inside. The ripe fruit is placed in a wooden bowl, making them the focal point of the image. Each fruit half rests in a different location on the table, showcasing the various sizes of these unique fruit-like berries. The close-up view of the cut open fruits and the table setting creates an appetizing scene, highlighting the natural color of the passion fruit and its appeal to anyone who enjoys a fresh and tangy experience with a hint of exotic taste. + +Visual Contrastive Decoding: The image features a wooden table with a basket of fruit, including a variety of fruits such as apples, oranges, and bananas. There are three apples on the table, with one placed near the left side, another in the middle, and the third one on the right side. In addition to the apples, there are two oranges on the table, one located in the middle and the other towards the right side. A banana can be seen on the left side of the table, and a cluster of grapes is also present, with some grapes placed near the center and others on the right side of the table. The assortment of fruits creates a colorful and appetizing display. + +Visual Amplification Fusion: The image features a wooden table with a basket of four purple fruits, possibly eggplants or figs, placed on it. The fruits are cut open, revealing their inwards. The fruits are arranged in a way that they are sitting on top of each other, with one fruit on the left side, another in the middle, and the third on the right side of the table. The fourth fruit is placed at the edge of the table. + +![](images/186b4777820d03711bd9a905a5233c8f91c9c5ca8d2c8ed43bec3792005c511b.jpg) +(a) Accuracy Metric + +![](images/792b6994a955d9a584b4ffe2d499b18844b6c1008aa9d461b3041cfa70c08cb0.jpg) +(b) F1-Score Metirc + +# 8.4. Case study on LLaVA-Bench + +Fig. 13, Fig. 14, Fig. 15, and Fig. 16 illustrate the effectiveness of various methods in mitigating model hallucinations on LLaVA-Bench. Across tasks such as numerical perception, image description, and complex reasoning, our approach demonstrates consistently superior performance in suppressing hallucinations. Experiments are conducted using LLaVA-v1.5-7B model. + +# 9. Additional Ablation Studies + +In Sec. 9.1, we examine how enhancing attention to visual features at different levels affects hallucination suppression. In Sec. 9.2, we analyze the influence of varying the suppression coefficient $\beta$ on mitigating hallucinations. Finally, in Sec. 9.3, we evaluate the performance of the VAF method in suppressing hallucinations under various sampling strategies. + +# 9.1. Effect of Enhancement at Different Layers + +We enhanced attention to visual features in layers 0-5, 10-15, and 20-25. Fig. 17 demonstrates the impact of enhancing visual attention at different layers. Notably, enhancing attention in the middle layers significantly reduces hallucination, while modifications in the shallow and deep layers have minimal effect on the generation results. As discussed in Sec. 4.1, this is because the model primarily integrates modality information in the middle layers. Thus, enhancing the focus on visual features during this phase is crucial for effectively mitigating hallucination. Experiments are conducted using LLaVA-v1.5-7B model on COCO-Random dataset from the POPE Benchmark. + +![](images/ffcc6e9e895eff01f780f8bd05da7688628711fa34ba7e6cfa9ed07eea6550d6.jpg) +Figure 17. The Effect of Enhancing Visual Attention at Different Layers on Prediction Accuracy. This experiment, conducted with the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark, demonstrates that enhancing attention to visual features in the model's middle layers significantly reduces hallucinations. +Figure 18. The effect of the suppression coefficient $\beta$ on the VAF method's ability to mitigate model hallucinations. The experiments were performed using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark. + +# 9.2. Effect of Suppression Coefficient + +We assessed the effect of the suppression coefficient $\beta$ on the performance of the VAF method using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark. In our experiments, $\alpha$ was fixed at 0.15, while $\beta$ was systematically adjusted. The results, presented in Fig. 18, reveal that when $0 < \beta < 0.15$ , VAF significantly enhanced its ability to suppress hallucinations in the model. This improvement is likely due to VAF reducing redundant attention to system prompts in this range, thereby reinforcing focus on visual features and enabling generated content to better align with the visual input. Conversely, + +
Sampling StrategyMethodAccuracyPrecisionRecallF1-Score
GreedyRegular88.294.481.487.4
VAF89.892.986.289.4
Direct SamplingRegular82.990.471.380.9
VAF83.990.680.985
Top PRegular84.392.172.582.1
VAF85.789.682.485.9
Top KRegular83.391.972.881.1
VAF8588.381.984.9
Top K + Temp0.5Regular85.595.174.984.5
VAF86.791.283.487
Top K + Temp1.5Regular80.487.170.277.8
VAF82.18678.281.9
+ +Table 10. Effectiveness of the VAF method in mitigating model hallucination under different sampling strategies. The highest score in each setting is highlighted in red. Experiments were conducted using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark. + +when $\beta > 0.15$ , the model's performance deteriorated. We hypothesize that this decline stems from excessive suppression of attention to system prompts, which disrupts the delicate balance required for effectively integrating multimodal information, ultimately leading to a degradation in overall performance. + +# 9.3. Effect of Different Sampling Strategies + +We evaluated the effectiveness of the VAF method in mitigating model hallucination under different sampling strategies using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark. The experimental results, shown in Tab. 10, indicate that the VAF method significantly mitigates model hallucination across all sampling strategies. + +# 10. Prompts for Different Tasks + +POPE Dataset. In the POPE dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red. + +A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. + +# USER: IMAGE + +Is there a cow in the image? Please just answer yes or no. + +# ASSISTANT: + +Nocaps Datasets. In Nocaps and Flickr30k dataset, input template for the model is presented below, with prompts highlighted in green and image highlighted in red. + +A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. + +# USER: IMAGE + +Provide a one-sentence caption for the provided image. + +# ASSISTANT: + +Sci-VQA Dataset. In the Sci-VQA dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red. + +A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. + +# USER: IMAGE + +Context: Select the best answer. + +Which property do these three objects have in common? + +A. shiny B. slippery C. opaque + +Answer with the option's letter from the given choices directly. + +# ASSISTANT: \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13107/images/0b87569274e178e6f9394d30e1099b4c2100d12bbd967c3a449cc44082e27c39.jpg b/data/2025/2503_13xxx/2503.13107/images/0b87569274e178e6f9394d30e1099b4c2100d12bbd967c3a449cc44082e27c39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec8e36cad266df94b6b8bf1767b7fa9cdfc769a5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/0b87569274e178e6f9394d30e1099b4c2100d12bbd967c3a449cc44082e27c39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0b057b114400d5d945b58acbc994fa6e531ffd9317ebadaae7b6dce5469ecb9 +size 86600 diff --git a/data/2025/2503_13xxx/2503.13107/images/0f8cb9e1d9e4977cd45b50bbf47700154d6997e655db5eea76ff1883310ed170.jpg b/data/2025/2503_13xxx/2503.13107/images/0f8cb9e1d9e4977cd45b50bbf47700154d6997e655db5eea76ff1883310ed170.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7abc0de5edd5f8a9d6ba9414f983557aa96d40e0 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/0f8cb9e1d9e4977cd45b50bbf47700154d6997e655db5eea76ff1883310ed170.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c85ccebe266fb000231cc4731cf61117bd711076dc4a116b0c81294aa4d5fa7b +size 3997 diff --git a/data/2025/2503_13xxx/2503.13107/images/11b3663bf0024698189fe6f3112ff21dcb6c23465d60d7948074f56300a00e95.jpg b/data/2025/2503_13xxx/2503.13107/images/11b3663bf0024698189fe6f3112ff21dcb6c23465d60d7948074f56300a00e95.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4827322ba0b4c825ffe85823059a3397dad65d52 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/11b3663bf0024698189fe6f3112ff21dcb6c23465d60d7948074f56300a00e95.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ce7624e2228ac4c1b33d4df5b63574403db7fddee329ceea973db895967e864 +size 66916 diff --git a/data/2025/2503_13xxx/2503.13107/images/186b4777820d03711bd9a905a5233c8f91c9c5ca8d2c8ed43bec3792005c511b.jpg b/data/2025/2503_13xxx/2503.13107/images/186b4777820d03711bd9a905a5233c8f91c9c5ca8d2c8ed43bec3792005c511b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a3714a0afb308050febdd6743b72458c18ec23b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/186b4777820d03711bd9a905a5233c8f91c9c5ca8d2c8ed43bec3792005c511b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:956382e9244657a32df334b6fbbc122fca58a9c43036f107d99d745b75bffb55 +size 38437 diff --git a/data/2025/2503_13xxx/2503.13107/images/1a904075e7cad436ffd0ce8802753c74ac8c0ab0e5717e2ca6c9dfb9b77aa3ea.jpg b/data/2025/2503_13xxx/2503.13107/images/1a904075e7cad436ffd0ce8802753c74ac8c0ab0e5717e2ca6c9dfb9b77aa3ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e830695b98546ce94abca29fd161b80c379ad5d1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/1a904075e7cad436ffd0ce8802753c74ac8c0ab0e5717e2ca6c9dfb9b77aa3ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb7b9c64f97a8197d1eb42a576adb8576c23567e3fb3022e6432f05baeaad27a +size 10445 diff --git a/data/2025/2503_13xxx/2503.13107/images/21e87c39a1185adaa1fa5daef642e19c09c6b7c37526b1af15163bfbeb932039.jpg b/data/2025/2503_13xxx/2503.13107/images/21e87c39a1185adaa1fa5daef642e19c09c6b7c37526b1af15163bfbeb932039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..623d7bfcabe8c64054c15961a7aae639ba225286 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/21e87c39a1185adaa1fa5daef642e19c09c6b7c37526b1af15163bfbeb932039.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f81b323da6c31614ed5f8b31d021e3a7f142426a9990d61bd5706e5efb728d8 +size 3611 diff --git a/data/2025/2503_13xxx/2503.13107/images/2888d89da363d0b60fede2e6cffd441ac8299d0e420dbeb7c1d6e1be961f06d9.jpg b/data/2025/2503_13xxx/2503.13107/images/2888d89da363d0b60fede2e6cffd441ac8299d0e420dbeb7c1d6e1be961f06d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b7e8d8204e083937554a8c3c5c6e011da5505b9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/2888d89da363d0b60fede2e6cffd441ac8299d0e420dbeb7c1d6e1be961f06d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7d8d40d9606b4588d6e72992195389c39b3bbddec5129a310d99745661d010d +size 49474 diff --git a/data/2025/2503_13xxx/2503.13107/images/3f193b68d405fb31bda95c7181340682375133c3e305b5f7afe5ebc6ac143304.jpg b/data/2025/2503_13xxx/2503.13107/images/3f193b68d405fb31bda95c7181340682375133c3e305b5f7afe5ebc6ac143304.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33fdf7463670eab62d4c413123679ed38fbe5679 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/3f193b68d405fb31bda95c7181340682375133c3e305b5f7afe5ebc6ac143304.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7781fbb820192228e348a34688f86538e96365414025cd2c4f4f955fb1e153ac +size 4231 diff --git a/data/2025/2503_13xxx/2503.13107/images/43f9cde0505695001d35da1fe6336854987e8cb1c5daf157a6cf91f7c5f9b531.jpg b/data/2025/2503_13xxx/2503.13107/images/43f9cde0505695001d35da1fe6336854987e8cb1c5daf157a6cf91f7c5f9b531.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ec3a3a088e2fb62b8ace724ca31304af5c48c78 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/43f9cde0505695001d35da1fe6336854987e8cb1c5daf157a6cf91f7c5f9b531.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:443a60752f08d296102809ecc5a92788c163dee1f0ea314ba437d5aa1991e389 +size 35273 diff --git a/data/2025/2503_13xxx/2503.13107/images/451a93053e14534301efd50b796019652e06c5d881b0caef998aa921869931cd.jpg b/data/2025/2503_13xxx/2503.13107/images/451a93053e14534301efd50b796019652e06c5d881b0caef998aa921869931cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d3afd4506780cae447ecc1746c4a51a1084cae2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/451a93053e14534301efd50b796019652e06c5d881b0caef998aa921869931cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd12c692870d59799e2ac0478fc97628d7d068ff91d153db84906ce21ec9d4d6 +size 74143 diff --git a/data/2025/2503_13xxx/2503.13107/images/4758f95feb2921349b4687274e578b427177cad7402749623729eb7e539198b3.jpg b/data/2025/2503_13xxx/2503.13107/images/4758f95feb2921349b4687274e578b427177cad7402749623729eb7e539198b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a33f0ac476545476050d23f7893efb4739396819 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/4758f95feb2921349b4687274e578b427177cad7402749623729eb7e539198b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bce4e2e91f381810dcec36fcb3345d8d67f3f538c26db7d254a6026d465fa2e4 +size 48515 diff --git a/data/2025/2503_13xxx/2503.13107/images/591b4f3e08189d0211b7d6030a04e8d5102e8a900d3a71f513acb19ff213f9c2.jpg b/data/2025/2503_13xxx/2503.13107/images/591b4f3e08189d0211b7d6030a04e8d5102e8a900d3a71f513acb19ff213f9c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e1df3f7930a05ae8b5619e309f98cd96450cc8e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/591b4f3e08189d0211b7d6030a04e8d5102e8a900d3a71f513acb19ff213f9c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b877cf14ee1831081be3218f9c31d093d51a1a08f0152d2761dcf32d8f72fc3 +size 23116 diff --git a/data/2025/2503_13xxx/2503.13107/images/5a8021e71dc27b5039408120018516907f081adc2b0b7a6b99e3f5b145ca2cf7.jpg b/data/2025/2503_13xxx/2503.13107/images/5a8021e71dc27b5039408120018516907f081adc2b0b7a6b99e3f5b145ca2cf7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9943d6c0646f2430627f43f7a341f232aba0972 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/5a8021e71dc27b5039408120018516907f081adc2b0b7a6b99e3f5b145ca2cf7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90e96a87eb30a1b7d642dd71b36517fcc05b9d4afd29946a00190446ce08bbae +size 52791 diff --git a/data/2025/2503_13xxx/2503.13107/images/5e53b49c44c6e0501a0e4aa3ecd37b74a84fb37345c7e5c3fb0250cb93d4c76f.jpg b/data/2025/2503_13xxx/2503.13107/images/5e53b49c44c6e0501a0e4aa3ecd37b74a84fb37345c7e5c3fb0250cb93d4c76f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a88f18bb1480d80149e51f512d3f41faaa499adb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/5e53b49c44c6e0501a0e4aa3ecd37b74a84fb37345c7e5c3fb0250cb93d4c76f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adfffe4334e4ddc9469461f1440842f91a94f7c3a51bef7805d968d7a46e2dc4 +size 12263 diff --git a/data/2025/2503_13xxx/2503.13107/images/703f7ad82282a916228916e145b95efd4e173a30ad5a5153fe76f0e5c7ac0718.jpg b/data/2025/2503_13xxx/2503.13107/images/703f7ad82282a916228916e145b95efd4e173a30ad5a5153fe76f0e5c7ac0718.jpg new file mode 100644 index 0000000000000000000000000000000000000000..396e9cf546f0ed8b81dc8d384143a5f8b15b562b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/703f7ad82282a916228916e145b95efd4e173a30ad5a5153fe76f0e5c7ac0718.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e44776273893081cb33d04d3ac363636cf2b764fd712d855b7c7adf0ab4dfbf0 +size 51062 diff --git a/data/2025/2503_13xxx/2503.13107/images/733a7a722b6287fcc9fdac7f056498e59787768f80cf66a874f9f622f4187058.jpg b/data/2025/2503_13xxx/2503.13107/images/733a7a722b6287fcc9fdac7f056498e59787768f80cf66a874f9f622f4187058.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a70311da70ce1e6f48fa13da9466b19ef53094af --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/733a7a722b6287fcc9fdac7f056498e59787768f80cf66a874f9f622f4187058.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61ce726100305e52cf7910a253f414243f0ed9ef8949124b6abe108bfcc6910e +size 152984 diff --git a/data/2025/2503_13xxx/2503.13107/images/7538f0c12c3655ed2dbe444565937bdcf67a420ed7cad16c765b0c24d4cd1f93.jpg b/data/2025/2503_13xxx/2503.13107/images/7538f0c12c3655ed2dbe444565937bdcf67a420ed7cad16c765b0c24d4cd1f93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34e8217d672c57d4bb685c0beb0b032a9e6d2a54 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/7538f0c12c3655ed2dbe444565937bdcf67a420ed7cad16c765b0c24d4cd1f93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:328d8203dc204a701f9fc7995721c902c08550af6b6e49680a2bae10eb45122f +size 5020 diff --git a/data/2025/2503_13xxx/2503.13107/images/792b6994a955d9a584b4ffe2d499b18844b6c1008aa9d461b3041cfa70c08cb0.jpg b/data/2025/2503_13xxx/2503.13107/images/792b6994a955d9a584b4ffe2d499b18844b6c1008aa9d461b3041cfa70c08cb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78abd4bfe60172d8eb457d77b238e549e54be550 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/792b6994a955d9a584b4ffe2d499b18844b6c1008aa9d461b3041cfa70c08cb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:636b672d7f383428ffc7aa001ddf9e817a3bded7ef6d4a3cce794be64ba39451 +size 32713 diff --git a/data/2025/2503_13xxx/2503.13107/images/7a443757c9f12c09f98302669fb4ab28d8f5b4293a012d1a4cd3b4d9d4bd0d9f.jpg b/data/2025/2503_13xxx/2503.13107/images/7a443757c9f12c09f98302669fb4ab28d8f5b4293a012d1a4cd3b4d9d4bd0d9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..332c3d92fba3c776add7348c3b1924cf67617897 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/7a443757c9f12c09f98302669fb4ab28d8f5b4293a012d1a4cd3b4d9d4bd0d9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63f97573aea37df048dfe2e0f4d6217711975f49c8a2775b0fb49028f426c350 +size 59630 diff --git a/data/2025/2503_13xxx/2503.13107/images/88514ab10ec1b0add4b169678541b00295fc37deda1c2ba83f677b1b7cc5202d.jpg b/data/2025/2503_13xxx/2503.13107/images/88514ab10ec1b0add4b169678541b00295fc37deda1c2ba83f677b1b7cc5202d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20dc2e335f0f9bbfaf6e0f17a441e7284602fa87 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/88514ab10ec1b0add4b169678541b00295fc37deda1c2ba83f677b1b7cc5202d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6559b12f12600a47278c7a916cec4e49683988e0c40dee99b20da53841c1a12b +size 14219 diff --git a/data/2025/2503_13xxx/2503.13107/images/8b5a7597a40413b0b625d92fa3f609bf4ba008a8e7e335f5c1d5bac48951892a.jpg b/data/2025/2503_13xxx/2503.13107/images/8b5a7597a40413b0b625d92fa3f609bf4ba008a8e7e335f5c1d5bac48951892a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02ebea9a0c98b74687ee2e1448eecd1e7bb0a88a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/8b5a7597a40413b0b625d92fa3f609bf4ba008a8e7e335f5c1d5bac48951892a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc0bccc17a44ec484319d941c3f9e526b729224eeb1e50f940e1d105352d24da +size 139570 diff --git a/data/2025/2503_13xxx/2503.13107/images/8f13287a325cf42a2c71a691dba2a24b548f39cbbaa373328960b2f00f6ac7b9.jpg b/data/2025/2503_13xxx/2503.13107/images/8f13287a325cf42a2c71a691dba2a24b548f39cbbaa373328960b2f00f6ac7b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8d9a146fd984309d1351cb835d107b635003800 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/8f13287a325cf42a2c71a691dba2a24b548f39cbbaa373328960b2f00f6ac7b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23c5eaa05a556f24584d3cd79efda0d91e3b21958e9750d3f518193e52c75a3c +size 5133 diff --git a/data/2025/2503_13xxx/2503.13107/images/9892ea3780bed5aaca7927a6248d8fead913c8fd06d75a5b023f781636f42ee7.jpg b/data/2025/2503_13xxx/2503.13107/images/9892ea3780bed5aaca7927a6248d8fead913c8fd06d75a5b023f781636f42ee7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fa85c0af30a9255d453d3188e00e0182236c2e7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/9892ea3780bed5aaca7927a6248d8fead913c8fd06d75a5b023f781636f42ee7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7894d8d7d6733c51000bd36799acbbe7726bc5dcc00dff43854e4c022e1c3df +size 3909 diff --git a/data/2025/2503_13xxx/2503.13107/images/a52934bd49c85c2eeb682f6e85fd76b33397436a158a1df077b2d7eddeb2b2c9.jpg b/data/2025/2503_13xxx/2503.13107/images/a52934bd49c85c2eeb682f6e85fd76b33397436a158a1df077b2d7eddeb2b2c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8380234fb8369543538ee6aceb07bc9aee6ae3bf --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/a52934bd49c85c2eeb682f6e85fd76b33397436a158a1df077b2d7eddeb2b2c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74b4f695db17661ecda0ab3f83fb7f21ee7762ca716988540017a78984e41a50 +size 27069 diff --git a/data/2025/2503_13xxx/2503.13107/images/a65c83187bf6f743d3ed6f698d34b951a870192e87a0223814be4491c320a2c8.jpg b/data/2025/2503_13xxx/2503.13107/images/a65c83187bf6f743d3ed6f698d34b951a870192e87a0223814be4491c320a2c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11b4aead841672480623f4a6aaff7fddbdd9fba2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/a65c83187bf6f743d3ed6f698d34b951a870192e87a0223814be4491c320a2c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e032d004e4c43267b0bb64b3df96f736c2a7e6aae9ba6c66ad34060aafc2e52b +size 10636 diff --git a/data/2025/2503_13xxx/2503.13107/images/a79b173bab09191aa22706ca508242c6b90a6e6be41e3fc5b3e73e81ffc48739.jpg b/data/2025/2503_13xxx/2503.13107/images/a79b173bab09191aa22706ca508242c6b90a6e6be41e3fc5b3e73e81ffc48739.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c43152dcc669ba321cd2747a15f7148c70b97aec --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/a79b173bab09191aa22706ca508242c6b90a6e6be41e3fc5b3e73e81ffc48739.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2273a2ebb499c69a3723dfc3c1779d5e2e212e3231ee77fe46acbefe91f5eb10 +size 40151 diff --git a/data/2025/2503_13xxx/2503.13107/images/a8a4f3d154c450fbb46e018e3baf35edf9633579582f096742ff7f80de9643a9.jpg b/data/2025/2503_13xxx/2503.13107/images/a8a4f3d154c450fbb46e018e3baf35edf9633579582f096742ff7f80de9643a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..345a8788c64d0d5dbf5538a0759b09a39ef19e24 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/a8a4f3d154c450fbb46e018e3baf35edf9633579582f096742ff7f80de9643a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37a628fbe7331a916b4703e5645fef97e3561ff29b23629fcadacb747aab11fd +size 5111 diff --git a/data/2025/2503_13xxx/2503.13107/images/b57391157530332b83eb40b4f74b2031f218abfbe584bca644d6cd2c3c90407f.jpg b/data/2025/2503_13xxx/2503.13107/images/b57391157530332b83eb40b4f74b2031f218abfbe584bca644d6cd2c3c90407f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f7a19325cd8532e8601a28b6ca529ef498c5563 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/b57391157530332b83eb40b4f74b2031f218abfbe584bca644d6cd2c3c90407f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f99d229511559df0f5a82d3a7e8f7824af5a2f79c5dc6e24d46aa7634485b337 +size 158979 diff --git a/data/2025/2503_13xxx/2503.13107/images/b5b433be94eda585d3a7cef671d94c6d0ca234272d9f996e8e718fe76967fb00.jpg b/data/2025/2503_13xxx/2503.13107/images/b5b433be94eda585d3a7cef671d94c6d0ca234272d9f996e8e718fe76967fb00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac350ee85b95a4958e2335ebfbddc062b109e71a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/b5b433be94eda585d3a7cef671d94c6d0ca234272d9f996e8e718fe76967fb00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfe8d37743a4273d6f2d930aca1108512d8883796e0683e5c91e23181af25ba0 +size 3568 diff --git a/data/2025/2503_13xxx/2503.13107/images/ba9b2e85d431481e3a8e32ea539a3d303b45e8f405ca48188412fd89af77be26.jpg b/data/2025/2503_13xxx/2503.13107/images/ba9b2e85d431481e3a8e32ea539a3d303b45e8f405ca48188412fd89af77be26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cdf29b074ba0f2cf371be705ff9e70a717b39b9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/ba9b2e85d431481e3a8e32ea539a3d303b45e8f405ca48188412fd89af77be26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d30ddc4070109fece46c4752546d4b29c0b57799632f0267a1e53d982087efc +size 27178 diff --git a/data/2025/2503_13xxx/2503.13107/images/bc707136f168b5b624e44e3170f545ffefd0d19ed8a9157308077d53570412be.jpg b/data/2025/2503_13xxx/2503.13107/images/bc707136f168b5b624e44e3170f545ffefd0d19ed8a9157308077d53570412be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf7e4fe1e83b78422d69b495465a2fca71f210aa --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/bc707136f168b5b624e44e3170f545ffefd0d19ed8a9157308077d53570412be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ddff8e48a927ddea546a24c9ce6eb1f709db829694e7754e569bde0cec1de3b +size 4250 diff --git a/data/2025/2503_13xxx/2503.13107/images/bda74f22bcd73caed9628f2ea6f61e04e1e365d3e7aacb83471252fff597a0b6.jpg b/data/2025/2503_13xxx/2503.13107/images/bda74f22bcd73caed9628f2ea6f61e04e1e365d3e7aacb83471252fff597a0b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25c6a9d3651db46aa3065f60c13239ae835634a2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/bda74f22bcd73caed9628f2ea6f61e04e1e365d3e7aacb83471252fff597a0b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125fbb9b2dff3ca2c29808195a91ebc308e1871a0290a39be5b83916d4597082 +size 11514 diff --git a/data/2025/2503_13xxx/2503.13107/images/be5b2e7bdc0f0d54ee5b1d62eccfec0dfcdb504e72b22a3e20bb1da48360402e.jpg b/data/2025/2503_13xxx/2503.13107/images/be5b2e7bdc0f0d54ee5b1d62eccfec0dfcdb504e72b22a3e20bb1da48360402e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eeb9344e89d7e6ddd252feab9c375544ef04bacb --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/be5b2e7bdc0f0d54ee5b1d62eccfec0dfcdb504e72b22a3e20bb1da48360402e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:262e46288aee59180621bcdb1b658f9f0cdac26d4ac7c3ea98fa1bd787271a44 +size 6380 diff --git a/data/2025/2503_13xxx/2503.13107/images/c8f13a8898f1f79f023450224eb4720425f24d7272f508eaac9db15c1b1adbef.jpg b/data/2025/2503_13xxx/2503.13107/images/c8f13a8898f1f79f023450224eb4720425f24d7272f508eaac9db15c1b1adbef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c4862dac9c937317527b6895cce02538f2422e9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/c8f13a8898f1f79f023450224eb4720425f24d7272f508eaac9db15c1b1adbef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:480ce738ebae209c7cc6c5e7ea0a3e9e6efab79c303ed10715ca61bad4eb39bb +size 5452 diff --git a/data/2025/2503_13xxx/2503.13107/images/cd4f57fe7e81f9e8b3921d58b7979766e51164cd331cbd4c3783c2b76d61d9c5.jpg b/data/2025/2503_13xxx/2503.13107/images/cd4f57fe7e81f9e8b3921d58b7979766e51164cd331cbd4c3783c2b76d61d9c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0be42dfc8ef2b88b027dd5aee446f2538ec04c08 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/cd4f57fe7e81f9e8b3921d58b7979766e51164cd331cbd4c3783c2b76d61d9c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f732912e768e5485ef4984be7d87351afcbb411138cacd83c13579980559ab +size 41027 diff --git a/data/2025/2503_13xxx/2503.13107/images/d5ad6983b6128eeec1e4e9721d200c6bac668bca9bea7e5ef70ec2c07258095b.jpg b/data/2025/2503_13xxx/2503.13107/images/d5ad6983b6128eeec1e4e9721d200c6bac668bca9bea7e5ef70ec2c07258095b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a58983d8106e7e15265ca2696e8d26ff19b64f90 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/d5ad6983b6128eeec1e4e9721d200c6bac668bca9bea7e5ef70ec2c07258095b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21d1a014adc6c284c680c5cdc812485621472097add98eb7dfefc63c898fdd90 +size 158980 diff --git a/data/2025/2503_13xxx/2503.13107/images/d68d5414ee93abce830c76d8bad3c0dd6824d54150d5ca53159dc02a3792d344.jpg b/data/2025/2503_13xxx/2503.13107/images/d68d5414ee93abce830c76d8bad3c0dd6824d54150d5ca53159dc02a3792d344.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a43f094f8499b10606b03268d674c5e24313e174 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/d68d5414ee93abce830c76d8bad3c0dd6824d54150d5ca53159dc02a3792d344.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fad9bb3db6847bfe38ee6fe85277f5467f0639dc73944da84bfd519f7e00cc9 +size 52598 diff --git a/data/2025/2503_13xxx/2503.13107/images/d96465a44578b92b95d86a245007c48c89608d96e2b641f2c2af97ae5787d3c4.jpg b/data/2025/2503_13xxx/2503.13107/images/d96465a44578b92b95d86a245007c48c89608d96e2b641f2c2af97ae5787d3c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29c8924e903bc2a2db18a5989926c7c48c1be259 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/d96465a44578b92b95d86a245007c48c89608d96e2b641f2c2af97ae5787d3c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d101171b7feb8e072855b5cd778c259e7127466da52c5e23bb97e8f29099734e +size 20669 diff --git a/data/2025/2503_13xxx/2503.13107/images/df3d04f20204c760b313ec2c84457e3343648bd94b9f015e7e1ca4671fe51867.jpg b/data/2025/2503_13xxx/2503.13107/images/df3d04f20204c760b313ec2c84457e3343648bd94b9f015e7e1ca4671fe51867.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9583885649f0d484f1beffbbc2f249d7ea449c35 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/df3d04f20204c760b313ec2c84457e3343648bd94b9f015e7e1ca4671fe51867.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2387c231697e916bafb00c99b6e0556c9a58ed29f2f80899aadd6b63ea049e15 +size 23427 diff --git a/data/2025/2503_13xxx/2503.13107/images/e23426c796e93cc5f660b7e3e88608745d0fe309af86cc261bdd053f150b18ab.jpg b/data/2025/2503_13xxx/2503.13107/images/e23426c796e93cc5f660b7e3e88608745d0fe309af86cc261bdd053f150b18ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03fc88620dcc6d5eb5c75006d366773e340e5846 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/e23426c796e93cc5f660b7e3e88608745d0fe309af86cc261bdd053f150b18ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b92f16ad98496ede245734457d5ac225f5ee599b972385140f52ee1af79d0179 +size 5961 diff --git a/data/2025/2503_13xxx/2503.13107/images/ed04285b06690a6f4f7556e8adf2d935197d4d1b0fc3e108bbbb0426666d9aa5.jpg b/data/2025/2503_13xxx/2503.13107/images/ed04285b06690a6f4f7556e8adf2d935197d4d1b0fc3e108bbbb0426666d9aa5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d110937854030b1d3dfdc649241823dfc91f3b7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/ed04285b06690a6f4f7556e8adf2d935197d4d1b0fc3e108bbbb0426666d9aa5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fe4d6adfcd5433921b8168f2568853201c8b946a2c32ea3337018b50854ce90 +size 39649 diff --git a/data/2025/2503_13xxx/2503.13107/images/f881ed1da6ab97af7e3a403c5126e7aeccbba2ec8e37e759b4095a3ea379329b.jpg b/data/2025/2503_13xxx/2503.13107/images/f881ed1da6ab97af7e3a403c5126e7aeccbba2ec8e37e759b4095a3ea379329b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4cfc7001680e6891ffd4cb5d6f6a6ffa8377b69 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/f881ed1da6ab97af7e3a403c5126e7aeccbba2ec8e37e759b4095a3ea379329b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e20eeb8edadc3d50ebecc5847ed2847ec78638343d47cb9e077078866f8537fd +size 32878 diff --git a/data/2025/2503_13xxx/2503.13107/images/faf2ea79047e1f229460c28677a640fe966e0b996354f2479bed0259c4fa714d.jpg b/data/2025/2503_13xxx/2503.13107/images/faf2ea79047e1f229460c28677a640fe966e0b996354f2479bed0259c4fa714d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a532def57bcc03a638da348e63f6efa81461d16 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/faf2ea79047e1f229460c28677a640fe966e0b996354f2479bed0259c4fa714d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18dd6cdb8b8c1e9aefc7a5940400ea0c4e99772ca6f691c7ed11edba6e14f9c0 +size 6333 diff --git a/data/2025/2503_13xxx/2503.13107/images/ffcc6e9e895eff01f780f8bd05da7688628711fa34ba7e6cfa9ed07eea6550d6.jpg b/data/2025/2503_13xxx/2503.13107/images/ffcc6e9e895eff01f780f8bd05da7688628711fa34ba7e6cfa9ed07eea6550d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b86641d3a4f1210545aba95556af2db577072b12 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/images/ffcc6e9e895eff01f780f8bd05da7688628711fa34ba7e6cfa9ed07eea6550d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41721d868e2684a69164f0fac7ba69bc277f0068df105eb11501f3d602f45bf2 +size 31385 diff --git a/data/2025/2503_13xxx/2503.13107/layout.json b/data/2025/2503_13xxx/2503.13107/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ab2f4d13b937d55e6eab9b8a44dc45f7c2988107 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13107/layout.json @@ -0,0 +1,12497 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 63, + 102, + 547, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 102, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 63, + 102, + 547, + 139 + ], + "type": "text", + "content": "ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 207, + 161, + 406, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 161, + 406, + 175 + ], + "spans": [ + { + "bbox": [ + 207, + 161, + 406, + 175 + ], + "type": "text", + "content": "Hao Yin Gunagzong Si Zilei Wang*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 190, + 176, + 419, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 176, + 419, + 190 + ], + "spans": [ + { + "bbox": [ + 190, + 176, + 419, + 190 + ], + "type": "text", + "content": "University of Science and Technology of China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 141, + 191, + 465, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 191, + 465, + 202 + ], + "spans": [ + { + "bbox": [ + 141, + 191, + 465, + 202 + ], + "type": "text", + "content": "{yinhnavi, guangzongsi}@mail.ustc.edu.cn, zlwang@ustc.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 151, + 231, + 199, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 231, + 199, + 243 + ], + "spans": [ + { + "bbox": [ + 151, + 231, + 199, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 255, + 296, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 255, + 296, + 544 + ], + "spans": [ + { + "bbox": [ + 55, + 255, + 296, + 544 + ], + "type": "text", + "content": "Contrastive decoding strategies are widely used to mitigate object hallucinations in multimodal large language models (MLLMs). By reducing over-reliance on language priors, these strategies ensure that generated content remains closely grounded in visual inputs, producing contextually accurate outputs. Since contrastive decoding requires no additional training or external tools, it offers both computational efficiency and versatility, making it highly attractive. However, these methods present two main limitations: (1) bluntly suppressing language priors can compromise coherence and accuracy of generated content, and (2) processing contrastive inputs adds computational load, significantly slowing inference speed. To address these challenges, we propose Visual Amplification Fusion (VAF), a plug-and-play technique that enhances attention to visual signals within the model's middle layers, where modality fusion predominantly occurs. This approach enables more effective capture of visual features, reducing the model's bias toward language modality. Experimental results demonstrate that VAF significantly reduces hallucinations across various MLLMs without affecting inference speed, while maintaining coherence and accuracy in generated outputs. The code is available at https://github.com/ustc-hyin/ClearSight." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 567, + 135, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 567, + 135, + 578 + ], + "spans": [ + { + "bbox": [ + 56, + 567, + 135, + 578 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 586, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 586, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 586, + 295, + 694 + ], + "type": "text", + "content": "In recent years, MLLMs [8, 27, 33, 35, 54, 55] have achieved remarkable progress in the intersecting fields of computer vision and natural language processing, and have been widely applied in tasks such as image captioning and visual question answering. However, these models often encounter the issue of \"object hallucination\" [15, 29, 32, 37] in practical applications, where the generated textual descriptions do not match the actual objects in the image. This problem highlights an over-reliance on unimodal pri" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 232, + 553, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 232, + 553, + 280 + ], + "spans": [ + { + "bbox": [ + 313, + 232, + 553, + 280 + ], + "type": "text", + "content": "ors (especially language priors) [17, 48, 50, 53] during inference, posing potential risks in high-precision applications such as medical diagnosis [18, 46] and autonomous driving [9, 34, 39, 49]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 281, + 555, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 413 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 413 + ], + "type": "text", + "content": "To address object hallucination [1, 2, 7, 14], several Contrastive Decoding strategies have been introduced in recent years. Among these, the Visual Contrastive Decoding (VCD) method has shown promise in reducing hallucinations by contrasting output distributions from both original and perturbed visual inputs, thus mitigating the model's excessive reliance on language priors [16, 41]. Notably, contrastive decoding methods do not require additional training or external tools, offering both computational efficiency and versatility, which has garnered them significant attention. However, these methods present two main limitations:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 331, + 423, + 479, + 435 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 423, + 479, + 435 + ], + "spans": [ + { + "bbox": [ + 331, + 423, + 479, + 435 + ], + "type": "text", + "content": "Limitations of Contrastive Decoding" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 330, + 445, + 539, + 517 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 330, + 445, + 538, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 445, + 538, + 480 + ], + "spans": [ + { + "bbox": [ + 330, + 445, + 538, + 480 + ], + "type": "text", + "content": "- While reducing over-reliance on language priors, these methods may compromise the coherence and accuracy of generated content." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 330, + 481, + 539, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 481, + 539, + 517 + ], + "spans": [ + { + "bbox": [ + 330, + 481, + 539, + 517 + ], + "type": "text", + "content": "- Contrastive decoding necessitates separate processing of the original and contrastive inputs, which considerably increases inference time." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 533, + 555, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 555, + 652 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 555, + 652 + ], + "type": "text", + "content": "To address these shortcomings, we hope to propose a training-free method that can effectively reduces hallucinations without compromising content quality or inference speed. Our saliency analysis of the model's attention maps reveals that biases toward language in generated content do not arise from an overemphasis on language signals but rather from insufficient attention on visual information during modality fusion. Based on this insight, we introduce a novel, plug-and-play technique to mitigate hallucinations: Visual Amplification Fusion (VAF)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 653, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 556, + 715 + ], + "type": "text", + "content": "Our analysis indicates that modality fusion in MLLMs primarily occurs within the middle layers. VAF specifically amplifies visual signals at these middle layers, enabling the model to capture more distinctive visual features during fusion, which in turn reduces false descriptions in generated" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 202, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 202, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 202, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.13107v2 [cs.CV] 27 May 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 703, + 145, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 145, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 145, + 712 + ], + "type": "text", + "content": "*Corresponding Author" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 133 + ], + "type": "text", + "content": "text. This technique not only strengthens the model's visual representations but also retains the beneficial influence of language priors, thus preserving content quality. Furthermore, by eliminating the need to process contrastive samples, VAF maintains inference speed." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "spans": [ + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "text", + "content": "Experimental results validate the effectiveness of the VAF method. Across multiple object hallucination benchmarks, VAF demonstrated notable performance gains, with improvements of approximately " + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "text", + "content": " on POPE and " + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "text", + "content": " on MME. In terms of coherence and accuracy of generated responses, VCD caused a roughly " + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "inline_equation", + "content": "19\\%" + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "text", + "content": " decrease on NoCaps, while VAF maintained content quality without negative impacts. Additionally, VCD reduced inference speed by " + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 55, + 133, + 295, + 240 + ], + "type": "text", + "content": ", whereas VAF had virtually no effect on inference speed." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 241, + 272, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 241, + 272, + 252 + ], + "spans": [ + { + "bbox": [ + 66, + 241, + 272, + 252 + ], + "type": "text", + "content": "In summary, the main contributions are as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 253, + 295, + 396 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 55, + 253, + 295, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 253, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 55, + 253, + 295, + 289 + ], + "type": "text", + "content": "- We identify the negative impacts of contrastive decoding methods on both the quality of generated content and model inference speed." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 289, + 295, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 289, + 295, + 323 + ], + "spans": [ + { + "bbox": [ + 56, + 289, + 295, + 323 + ], + "type": "text", + "content": "- We analyze the modality fusion mechanism in MLLMs, highlighting its insufficient attention to visual information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 324, + 295, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 324, + 295, + 361 + ], + "spans": [ + { + "bbox": [ + 56, + 324, + 295, + 361 + ], + "type": "text", + "content": "- We introduce the VAF method, which effectively mitigates the object hallucination problem while maintaining inference speed, coherence, and accuracy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 361, + 295, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 361, + 295, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 361, + 295, + 396 + ], + "type": "text", + "content": "- We demonstrate the significant performance improvements of the VAF method across multiple object hallucination benchmarks." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 409, + 139, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 409, + 139, + 422 + ], + "spans": [ + { + "bbox": [ + 55, + 409, + 139, + 422 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 429, + 250, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 429, + 250, + 442 + ], + "spans": [ + { + "bbox": [ + 55, + 429, + 250, + 442 + ], + "type": "text", + "content": "2.1. Multimodal Large Language Models" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 447, + 295, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 447, + 295, + 627 + ], + "spans": [ + { + "bbox": [ + 55, + 447, + 295, + 627 + ], + "type": "text", + "content": "The development of MLLMs [26, 36, 51, 52] has advanced from BERT-based decoders to LLM-based architectures [4, 11, 40, 43-45], enabling improved multimodal relationship capture [6, 10, 24, 25]. Models like BLIP-2 [27] and miniGPT-4 [55] incorporate a Q-Former mechanism, which enhances the alignment between visual and textual inputs, allowing for more precise interactions across modalities. InstructBLIP [12] builds on this approach by adding task-specific instructions, which improve the model's understanding of context-sensitive visual semantics. LLaVA [33] and Qwen-VL [5] utilize simpler linear projection techniques that streamline the alignment process, resulting in improved overall performance on vision-language tasks. However, hallucination issues persist across MLLMs, posing a significant challenge that requires further research." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 635, + 228, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 228, + 649 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 228, + 649 + ], + "type": "text", + "content": "2.2. Contrastive Decoding Strategies" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 653, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 295, + 714 + ], + "type": "text", + "content": "In recent years, Contrastive Decoding [19, 21, 22, 28] has emerged as a technique to improve generative model accuracy through contrastive judgment, widely employed to address hallucinations in generated content. For instance, Visual Contrastive Decoding (VCD) [23] contrasts output" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 72, + 555, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 252 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 252 + ], + "type": "text", + "content": "distributions derived from original and distorted visual inputs, effectively reducing the over-reliance on statistical bias and unimodal priors, two essential causes of object hallucinations. Similarly, Instruction Contrastive Decoding (ICD) [47] works by comparing distributions derived from standard and disrupted instructions, thereby removing hallucinated concepts from the original distribution. These contrastive methods help ground generated content closely to visual inputs, resulting in contextually accurate outputs. However, despite these advancements, contrastive decoding faces two primary limitations: slower inference speed and reduced coherence in generated content. To overcome these limitations, we propose the VAF method, which achieves effective hallucination reduction while preserving both inference speed and content coherence." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 261, + 473, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 261, + 473, + 274 + ], + "spans": [ + { + "bbox": [ + 313, + 261, + 473, + 274 + ], + "type": "text", + "content": "3. Preliminary and Motivation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 281, + 555, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 354 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 354 + ], + "type": "text", + "content": "In Sec. 3.1, we illustrate the working mechanism of contrastive decoding to mitigate hallucinations, using Visual Contrastive Decoding as an example. In Sec. 3.2, we analysis two main drawbacks of this approach: its potential to disrupt the coherence and accuracy of generated content, and its tendency to slow down model inference." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 359, + 440, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 359, + 440, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 359, + 440, + 372 + ], + "type": "text", + "content": "3.1. Contrastive Decoding" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": "We consider a MLLM parametrized by " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": ". The model takes as input a textual query " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": " and a visual input " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": " provides contextual visual information to assist the model in generating a relevant response " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": " to the textual query. The response " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": " is sampled auto-regressively from the probability distribution conditioned on the query " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": " and the visual context " + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 376, + 554, + 460 + ], + "type": "text", + "content": ". Mathematically, this can be formulated as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 370, + 467, + 553, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 467, + 553, + 495 + ], + "spans": [ + { + "bbox": [ + 370, + 467, + 553, + 495 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} y _ {t} \\sim p _ {\\theta} \\left(y _ {t} \\mid v, x, y _ {< t}\\right) \\tag {1} \\\\ \\propto \\exp \\operatorname {l o g i t} _ {\\theta} \\left(y _ {t} \\mid v, x, y _ {< t}\\right) \\\\ \\end{array}", + "image_path": "faf2ea79047e1f229460c28677a640fe966e0b996354f2479bed0259c4fa714d.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "text", + "content": " denotes the token at time step " + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "inline_equation", + "content": "y_{< t}" + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "text", + "content": " represents the sequence of generated tokens up to the time step " + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "inline_equation", + "content": "(t - 1)" + }, + { + "bbox": [ + 313, + 501, + 554, + 525 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "spans": [ + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": "To mitigate the issue of object hallucination in MLLMs, contrastive decoding techniques can be applied. Here, we present Visual Contrastive Decoding (VCD) as a representative approach, shown in Fig. 1. Specifically, given a textual query " + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": " and a visual input " + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": ", the model generates two distinct output distributions: one conditioned on the original " + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": " and the other on the distorted visual input " + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "inline_equation", + "content": "v'" + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": ", which is derived by applying pre-defined distortions (i.e., Gaussian noise mask) to " + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": ". Then, a new contrastive probability distribution is computed by exploiting the differences between the two initially obtained distributions. The new contrastive distribution " + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "inline_equation", + "content": "p_{vcd}" + }, + { + "bbox": [ + 313, + 525, + 555, + 669 + ], + "type": "text", + "content": " is formulated as:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 328, + 673, + 553, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 673, + 553, + 716 + ], + "spans": [ + { + "bbox": [ + 328, + 673, + 553, + 716 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} p _ {v c d} (y \\mid v, v ^ {\\prime}, x) = \\text {s o f t m a x} \\left[ \\operatorname {l o g i t} _ {\\theta} (y \\mid v, x) + \\right. \\tag {2} \\\\ \\left. \\alpha \\cdot \\left(\\operatorname {l o g i t} _ {\\theta} (y \\mid v, x) - \\operatorname {l o g i t} _ {\\theta} (y \\mid v ^ {\\prime}, x)\\right) \\right], \\\\ \\end{array}", + "image_path": "bda74f22bcd73caed9628f2ea6f61e04e1e365d3e7aacb83471252fff597a0b6.jpg" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "type": "text", + "content": "where larger " + }, + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "type": "text", + "content": " values indicate a stronger amplification of differences between the two distributions (" + }, + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "type": "inline_equation", + "content": "\\alpha = 0" + }, + { + "bbox": [ + 55, + 72, + 296, + 133 + ], + "type": "text", + "content": " reduces to regular decoding). Essentially, VCD serves as a corrective mechanism, reducing hallucinations by contrasting against a distribution predisposed to favoring them." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 142, + 293, + 257 + ], + "blocks": [ + { + "bbox": [ + 59, + 142, + 293, + 257 + ], + "lines": [ + { + "bbox": [ + 59, + 142, + 293, + 257 + ], + "spans": [ + { + "bbox": [ + 59, + 142, + 293, + 257 + ], + "type": "image", + "image_path": "4758f95feb2921349b4687274e578b427177cad7402749623729eb7e539198b3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 264, + 296, + 331 + ], + "lines": [ + { + "bbox": [ + 55, + 264, + 296, + 331 + ], + "spans": [ + { + "bbox": [ + 55, + 264, + 296, + 331 + ], + "type": "text", + "content": "Figure 1. Illustration of Visual Contrastive Decoding. The hallucinated object \"Teacher\" is suppressed by contrasting with an output distribution prone to hallucinations. This method has two main drawbacks: (1) additional processing of distorted visual inputs greatly increases inference time; (2) subtracting the language prior disrupts content coherence." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 351, + 249, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 351, + 249, + 364 + ], + "spans": [ + { + "bbox": [ + 55, + 351, + 249, + 364 + ], + "type": "text", + "content": "3.2. Limitations of Contrastive Decoding" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 369, + 296, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 296, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 296, + 441 + ], + "type": "text", + "content": "As contrastive decoding methods do not require training or external tools, they offer high computational efficiency and generalizability, attracting significant attention in academia. However, these methods still have two major drawbacks: a reduction in the quality of generated content and slower inference speed." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 74, + 453, + 272, + 587 + ], + "blocks": [ + { + "bbox": [ + 74, + 453, + 272, + 587 + ], + "lines": [ + { + "bbox": [ + 74, + 453, + 272, + 587 + ], + "spans": [ + { + "bbox": [ + 74, + 453, + 272, + 587 + ], + "type": "image", + "image_path": "ba9b2e85d431481e3a8e32ea539a3d303b45e8f405ca48188412fd89af77be26.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 596, + 295, + 640 + ], + "lines": [ + { + "bbox": [ + 55, + 596, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 55, + 596, + 295, + 640 + ], + "type": "text", + "content": "Figure 2. Impact of VCD on Model Performance. CIDEr scores are reported on the Nocaps benchmark, while Accuracy is presented for the ScienceQA benchmark. The use of VCD leads to a significant decline in model performance." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": "While contrasting logits of " + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "p_{\\theta}(y \\mid v, x)" + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "p_{\\theta}(y \\mid v', x)" + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": " can help reduce over-reliance on language priors and mitigate hallucination in MLLMs-as evidenced by a " + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": " performance gain on the POPE benchmark using the VCD method-merely decreasing the influence of the language" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 167 + ], + "type": "text", + "content": "modality on the output distribution may undermine the coherence of the generated content, potentially leading to prediction errors. This issue is less pronounced in straightforward object hallucination tasks, where responses are limited to binary options, such as \"yes\" or \"no\". However, in more complex tasks, including multiple-choice question answering and image caption, the impact of contrastive learning methods on content quality becomes more significant." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "spans": [ + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "type": "text", + "content": "To verify this, we applied VCD method to LLaVA-v1.5-7B and LLaVA-v1.5-13B models, assessing their performance on the ScienceQA [38] and NoCaps benchmarks. As illustrated in Fig. 2, our findings reveal that, following the application of VCD, model performance decreased by " + }, + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "type": "text", + "content": " on ScienceQA and by a considerable " + }, + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "type": "inline_equation", + "content": "45\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 276 + ], + "type": "text", + "content": " on NoCaps. These results suggest that in tasks requiring nuanced natural language generation, contrastive decoding methods can substantially impair content quality." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 324, + 284, + 545, + 369 + ], + "blocks": [ + { + "bbox": [ + 324, + 284, + 545, + 369 + ], + "lines": [ + { + "bbox": [ + 324, + 284, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 324, + 284, + 545, + 369 + ], + "type": "table", + "html": "
ModelMethodScienceQANocaps
LLaVA-v1.5-7BRegular0.141s0.456s
VCD0.293s1.086s
LLaVA-v1.5-13BRegular0.222s0.602s
VCD0.459s1.372s
", + "image_path": "f881ed1da6ab97af7e3a403c5126e7aeccbba2ec8e37e759b4095a3ea379329b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 376, + 555, + 421 + ], + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 421 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 421 + ], + "type": "text", + "content": "Table 1. Impact of VCD on Model Inference Speed. The table shows the average inference time per sample (in seconds) on the ScienceQA and Nocaps benchmarks. Applying the VCD method nearly doubled the inference time of the model." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "spans": [ + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "text", + "content": "Contrastive decoding methods notably reduce inference speed because they require calculating the output distribution for additional contrastive samples. For instance, in VCD method, each visual input " + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "text", + "content": " necessitates computing the logits of both " + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "inline_equation", + "content": "p_{\\theta}(y \\mid v, x)" + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "inline_equation", + "content": "p_{\\theta}(y \\mid v', x)" + }, + { + "bbox": [ + 313, + 433, + 554, + 552 + ], + "type": "text", + "content": " separately. This doubles the computation load during inference compared to vanilla decoding. We evaluated the inference speed of VCD versus vanilla decoding on ScienceQA. The experimental results, shown in Tab. 1, reveal that VCD's inference time is almost double that of vanilla decoding." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 562, + 491, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 562, + 491, + 575 + ], + "spans": [ + { + "bbox": [ + 313, + 562, + 491, + 575 + ], + "type": "text", + "content": "4. Visual Neglect in Modal Fusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": "The primary objective of this section is to examine why MLLMs tend to rely excessively on language priors in their predictions. In Sec. 4.1, saliency analysis reveals that image tokens influence prediction outcomes mainly through interactions with instruction tokens within the middle layers. Sec. 4.2 then compares attention weights across different modalities, showing that the attention given to visual features is notably lower than that allocated to system prompts and user instructions. These findings indicate that visual information is often underutilized in the modality fusion process, resulting in an over-reliance on language priors." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 247, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 247, + 85 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 247, + 85 + ], + "type": "text", + "content": "4.1. Mid-layer: Visual-Language Fusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 90, + 295, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 90, + 295, + 161 + ], + "spans": [ + { + "bbox": [ + 55, + 90, + 295, + 161 + ], + "type": "text", + "content": "To uncover why MLLMs tend to overly rely on language priors and overlook visual content in prediction, it is necessary first to clarify how the model utilizes visual modality information. This section explores the influence of the visual modality on prediction outcomes from the perspective of visual information interaction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 163, + 296, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 163, + 296, + 222 + ], + "spans": [ + { + "bbox": [ + 55, + 163, + 296, + 222 + ], + "type": "text", + "content": "We employ the saliency technique, a widely used interpretability tool, to highlight key token interactions within the attention mechanism. Following established practices, we utilize Taylor expansion to compute saliency scores for each element of the attention matrix:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 231, + 295, + 264 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 231, + 295, + 264 + ], + "spans": [ + { + "bbox": [ + 120, + 231, + 295, + 264 + ], + "type": "interline_equation", + "content": "I _ {l} = \\left| \\sum_ {h} A _ {h, l} \\odot \\frac {\\partial \\mathcal {L} (x)}{\\partial A _ {h , l}} \\right|. \\tag {3}", + "image_path": "e23426c796e93cc5f660b7e3e88608745d0fe309af86cc261bdd053f150b18ab.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "A_{h,l}" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": " represents the attention matrix value for the " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "-th attention head in the " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "-th layer, " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": " denotes the input, and " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(x)" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": " is the loss function of the task, e.g., the cross-entropy objective for question-answering tasks. The saliency matrix " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "I_{l}" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "-th layer is obtained by averaging across all attention heads. The significance of information flow from the " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "-th token to the " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "-th token in MLLMs is represented by " + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "inline_equation", + "content": "I_{l}(i,j)" + }, + { + "bbox": [ + 55, + 274, + 296, + 369 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 370, + 296, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 370, + 296, + 430 + ], + "spans": [ + { + "bbox": [ + 55, + 370, + 296, + 430 + ], + "type": "text", + "content": "To draw a clearer picture of visual information flow in MLLMs, we introduce two quantitative metrics based on " + }, + { + "bbox": [ + 55, + 370, + 296, + 430 + ], + "type": "inline_equation", + "content": "I_{l}(i,j)" + }, + { + "bbox": [ + 55, + 370, + 296, + 430 + ], + "type": "text", + "content": ", with a particular focus on the information interaction involving image tokens. The definitions of the two quantitative metrics follow below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 430, + 296, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 430, + 296, + 454 + ], + "spans": [ + { + "bbox": [ + 55, + 430, + 296, + 454 + ], + "type": "inline_equation", + "content": "S_{vv}" + }, + { + "bbox": [ + 55, + 430, + 296, + 454 + ], + "type": "text", + "content": ", measuring the importance of information flow among image tokens:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 464, + 294, + 492 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 464, + 294, + 492 + ], + "spans": [ + { + "bbox": [ + 110, + 464, + 294, + 492 + ], + "type": "interline_equation", + "content": "S _ {v v} = \\frac {\\sum_ {(i , j) \\in C _ {v v}} I _ {l} (i , j)}{\\left| C _ {v v} \\right|} \\tag {4}", + "image_path": "7538f0c12c3655ed2dbe444565937bdcf67a420ed7cad16c765b0c24d4cd1f93.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 495, + 241, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 495, + 241, + 508 + ], + "spans": [ + { + "bbox": [ + 110, + 495, + 241, + 508 + ], + "type": "interline_equation", + "content": "C _ {v v} = \\{(i, j): i, j \\in \\mathcal {V}, i \\geq j \\}.", + "image_path": "9892ea3780bed5aaca7927a6248d8fead913c8fd06d75a5b023f781636f42ee7.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 517, + 296, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 517, + 296, + 542 + ], + "spans": [ + { + "bbox": [ + 55, + 517, + 296, + 542 + ], + "type": "inline_equation", + "content": "S_{vt}" + }, + { + "bbox": [ + 55, + 517, + 296, + 542 + ], + "type": "text", + "content": ", measuring the importance of information flow from image tokens to instruction tokens:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 113, + 552, + 295, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 552, + 295, + 580 + ], + "spans": [ + { + "bbox": [ + 113, + 552, + 295, + 580 + ], + "type": "interline_equation", + "content": "S _ {v t} = \\frac {\\sum_ {(i , j) \\in C _ {v t}} I _ {l} (i , j)}{\\left| C _ {v t} \\right|} \\tag {5}", + "image_path": "a8a4f3d154c450fbb46e018e3baf35edf9633579582f096742ff7f80de9643a9.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 582, + 238, + 595 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 582, + 238, + 595 + ], + "spans": [ + { + "bbox": [ + 113, + 582, + 238, + 595 + ], + "type": "interline_equation", + "content": "C _ {v t} = \\{(i, j): i \\in \\mathcal {T}, j \\in \\mathcal {V} \\}.", + "image_path": "21e87c39a1185adaa1fa5daef642e19c09c6b7c37526b1af15163bfbeb932039.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "text", + "content": " represents the index set of image tokens, derived from features learned by pre-trained visual encoders, while " + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "text", + "content": " denotes the index set of instruction tokens, specifying requests or questions related to the images. " + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "inline_equation", + "content": "S_{vv}" + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "inline_equation", + "content": "S_{vt}" + }, + { + "bbox": [ + 55, + 605, + 296, + 714 + ], + "type": "text", + "content": " are utilized to analyze the mechanisms of visual information processing in MLLMs. We define attention interactions among image tokens as intra-visual information flow and those between image and instruction tokens as visual-textual information flow." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "content": "We conducted experiments with the LLaVA-v1.5-7B model on the MS COCO dataset under the POPE benchmark, sampling 500 examples for evaluation. Fig. 3 underscores the critical role of the visual-textual information flow within the model's middle layers, specifically from the 8-th to the 15-th layer. This observation indicates that in these layers, visual information interacts intensively with textual information via attention mechanisms, which substantially influences the prediction outcomes." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 329, + 194, + 538, + 326 + ], + "blocks": [ + { + "bbox": [ + 329, + 194, + 538, + 326 + ], + "lines": [ + { + "bbox": [ + 329, + 194, + 538, + 326 + ], + "spans": [ + { + "bbox": [ + 329, + 194, + 538, + 326 + ], + "type": "image", + "image_path": "591b4f3e08189d0211b7d6030a04e8d5102e8a900d3a71f513acb19ff213f9c2.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 331, + 555, + 365 + ], + "lines": [ + { + "bbox": [ + 313, + 331, + 555, + 365 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 555, + 365 + ], + "type": "text", + "content": "Figure 3. The importance of intra-visual flow and visual-textual flow across various layers. The visual-textual information flow in the middle layers has a significant impact on prediction outcomes." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 377, + 520, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 377, + 520, + 389 + ], + "spans": [ + { + "bbox": [ + 313, + 377, + 520, + 389 + ], + "type": "text", + "content": "4.2. Attention Imbalance Across Modalities" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 395, + 554, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 395, + 554, + 455 + ], + "spans": [ + { + "bbox": [ + 313, + 395, + 554, + 455 + ], + "type": "text", + "content": "Sec. 4.1 reveals that the middle layers facilitate crucial fusion, integrating visual and textual inputs into cross-modal semantic representations that drive final predictions. Accordingly, this section will delve deeper into the attention to visual inputs throughout the modality fusion process." + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 321, + 468, + 539, + 613 + ], + "blocks": [ + { + "bbox": [ + 321, + 468, + 539, + 613 + ], + "lines": [ + { + "bbox": [ + 321, + 468, + 539, + 613 + ], + "spans": [ + { + "bbox": [ + 321, + 468, + 539, + 613 + ], + "type": "image", + "image_path": "5a8021e71dc27b5039408120018516907f081adc2b0b7a6b99e3f5b145ca2cf7.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 619, + 555, + 663 + ], + "lines": [ + { + "bbox": [ + 313, + 619, + 555, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 619, + 555, + 663 + ], + "type": "text", + "content": "Figure 4. Attention Distribution of Modal Information Across Model Layers. In the middle layers, the model allocates insufficient attention to visual features while disproportionately focusing on system prompts." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "content": "We define the attention allocation, " + }, + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "content": ", as the aggregate attention score assigned to a specific type of token within a single layer. Accordingly, the attention allocation for sys" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "type": "text", + "content": "tem prompts, visual features, and user instructions in the " + }, + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 72, + 295, + 96 + ], + "type": "text", + "content": "-th layer can be computed as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 127, + 106, + 225, + 132 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 106, + 225, + 132 + ], + "spans": [ + { + "bbox": [ + 127, + 106, + 225, + 132 + ], + "type": "interline_equation", + "content": "\\lambda_ {s y s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {S}} A _ {l} (i, j),", + "image_path": "3f193b68d405fb31bda95c7181340682375133c3e305b5f7afe5ebc6ac143304.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 135, + 295, + 160 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 135, + 295, + 160 + ], + "spans": [ + { + "bbox": [ + 127, + 135, + 295, + 160 + ], + "type": "interline_equation", + "content": "\\lambda_ {v i s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {V}} A _ {l} (i, j), \\tag {6}", + "image_path": "8f13287a325cf42a2c71a691dba2a24b548f39cbbaa373328960b2f00f6ac7b9.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 163, + 225, + 190 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 163, + 225, + 190 + ], + "spans": [ + { + "bbox": [ + 127, + 163, + 225, + 190 + ], + "type": "interline_equation", + "content": "\\lambda_ {i n s} ^ {l} = \\sum_ {i \\in \\mathcal {T}} \\sum_ {j \\in \\mathcal {T}} A _ {l} (i, j).", + "image_path": "bc707136f168b5b624e44e3170f545ffefd0d19ed8a9157308077d53570412be.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "text", + "content": "In this context, " + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "inline_equation", + "content": "A_{l}" + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "text", + "content": " represents the attention matrix averaged across all attention heads, while " + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "text", + "content": " represents the indices of system tokens. The measures " + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "inline_equation", + "content": "\\lambda_{sys}^{l}, \\lambda_{vis}^{l}" + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "inline_equation", + "content": "\\lambda_{ins}^{l}" + }, + { + "bbox": [ + 55, + 194, + 296, + 277 + ], + "type": "text", + "content": " provide insight into the distribution of attention to different modalities across various layers, aiding in understanding the reasons for the underutilization of visual information during the modality fusion process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 278, + 296, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 296, + 387 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 296, + 387 + ], + "type": "text", + "content": "The experimental setup aligns with that described in Sec. 4.1. Fig. 4 illustrates the allocation of attention to different modalities across the model's layers. In the middle layers, attention to visual features is markedly lower than that given to system prompts and user instructions. This suggests that during the critical process of modality fusion, the model's focus on visual input is insufficient. As a result, visual information is underutilized, leading to an output distribution skewed toward language priors." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 394, + 116, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 394, + 116, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 394, + 116, + 407 + ], + "type": "text", + "content": "4.3. Insights" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 412, + 295, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 412, + 295, + 436 + ], + "spans": [ + { + "bbox": [ + 55, + 412, + 295, + 436 + ], + "type": "text", + "content": "Based on the experimental results presented in Sec. 4.1 and Sec. 4.2, two significant conclusions can be drawn:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 438, + 295, + 498 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 55, + 438, + 295, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 438, + 295, + 474 + ], + "spans": [ + { + "bbox": [ + 55, + 438, + 295, + 474 + ], + "type": "text", + "content": "- The model performs the crucial fusion of visual and textual modalities in the middle layers, creating cross-modal semantic representations that drive the final predictions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 475, + 295, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 475, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 55, + 475, + 295, + 498 + ], + "type": "text", + "content": "- During this critical fusion process, the model demonstrates inadequate attention to the visual modality." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 500, + 296, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 500, + 296, + 548 + ], + "spans": [ + { + "bbox": [ + 55, + 500, + 296, + 548 + ], + "type": "text", + "content": "These findings indicate that models fail to fully utilize visual information, resulting in an excessive dependence on language priors and, subsequently, the occurrence of hallucination phenomena." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 559, + 214, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 559, + 214, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 559, + 214, + 574 + ], + "type": "text", + "content": "5. Visual Amplification Fusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 579, + 296, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 579, + 296, + 664 + ], + "spans": [ + { + "bbox": [ + 55, + 579, + 296, + 664 + ], + "type": "text", + "content": "Building on the insights presented in Sec. 4, we introduce a hallucination mitigation method called Visual Amplification Fusion (VAF). As illustrated in Fig. 5, This approach heightens attention to visual information during modality fusion, effectively reducing the excessive dependency on language priors and ensuring that the generated content is closely grounded to visual inputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 672, + 194, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 194, + 683 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 194, + 683 + ], + "type": "text", + "content": "5.1. Attention Redistribution" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "As outlined in Sec. 4, the model performs crucial fusion of visual and textual modalities within the middle layers." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 326, + 70, + 541, + 190 + ], + "blocks": [ + { + "bbox": [ + 326, + 70, + 541, + 190 + ], + "lines": [ + { + "bbox": [ + 326, + 70, + 541, + 190 + ], + "spans": [ + { + "bbox": [ + 326, + 70, + 541, + 190 + ], + "type": "image", + "image_path": "a79b173bab09191aa22706ca508242c6b90a6e6be41e3fc5b3e73e81ffc48739.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 193, + 555, + 239 + ], + "lines": [ + { + "bbox": [ + 313, + 193, + 555, + 239 + ], + "spans": [ + { + "bbox": [ + 313, + 193, + 555, + 239 + ], + "type": "text", + "content": "Figure 5. Illustration of the Visual Amplification Fusion Method. In the middle layers, we select attention heads highly responsive to visual information, amplifying their focus on visual features while reducing unnecessary attention to system prompts." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 256, + 555, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 256, + 555, + 304 + ], + "spans": [ + { + "bbox": [ + 313, + 256, + 555, + 304 + ], + "type": "text", + "content": "However, the attention allocated to visual modality information during this process remains insufficient. To address this, we adjust the attention weights in these layers to achieve a more balanced focus." + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 326, + 311, + 542, + 440 + ], + "blocks": [ + { + "bbox": [ + 326, + 311, + 542, + 440 + ], + "lines": [ + { + "bbox": [ + 326, + 311, + 542, + 440 + ], + "spans": [ + { + "bbox": [ + 326, + 311, + 542, + 440 + ], + "type": "image", + "image_path": "cd4f57fe7e81f9e8b3921d58b7979766e51164cd331cbd4c3783c2b76d61d9c5.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 449, + 555, + 503 + ], + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 503 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 503 + ], + "type": "text", + "content": "Figure 6. Effect of Enhanced Visual Attention on Hallucination Suppression. Increasing attention to visual features in the fusion process of the model's middle layers successfully reduces hallucinations, enabling the model to correct its grape color prediction from \"green\" to \"red\"." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "inline_equation", + "content": "A_{l,h}" + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "text", + "content": " denote the attention matrix of the " + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "text", + "content": "-th attention head in the " + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "text", + "content": "-th layer, and " + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "inline_equation", + "content": "Z_{l,h}" + }, + { + "bbox": [ + 313, + 517, + 554, + 553 + ], + "type": "text", + "content": " represent its corresponding attention score matrix, defined as:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 384, + 560, + 553, + 573 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 560, + 553, + 573 + ], + "spans": [ + { + "bbox": [ + 384, + 560, + 553, + 573 + ], + "type": "interline_equation", + "content": "A _ {l, h} = \\operatorname {s o f t m a x} \\left(Z _ {l, h}\\right). \\tag {7}", + "image_path": "b5b433be94eda585d3a7cef671d94c6d0ca234272d9f996e8e718fe76967fb00.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 579, + 555, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 579, + 555, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 579, + 555, + 663 + ], + "type": "text", + "content": "Our objective during the modality fusion process is to amplify the model's attention to visual features while curbing an overemphasis on system prompts. This adjustment facilitates improved integration of visual information and reduces over-reliance on language priors. To achieve this, we modify the attention score matrix in the middle layers (i.e., " + }, + { + "bbox": [ + 313, + 579, + 555, + 663 + ], + "type": "inline_equation", + "content": "8 < l < 15" + }, + { + "bbox": [ + 313, + 579, + 555, + 663 + ], + "type": "text", + "content": ") as follows:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 325, + 669, + 553, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 669, + 553, + 685 + ], + "spans": [ + { + "bbox": [ + 325, + 669, + 553, + 685 + ], + "type": "interline_equation", + "content": "\\hat {Z} _ {l, h} = Z _ {l, h} + \\alpha \\cdot M _ {l, h} ^ {\\text {e n h}} \\circ Z _ {l, h} - \\beta \\cdot M _ {l, h} ^ {\\text {s u p}} \\circ Z _ {l, h}. \\tag {8}", + "image_path": "be5b2e7bdc0f0d54ee5b1d62eccfec0dfcdb504e72b22a3e20bb1da48360402e.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": " is the enhancement coefficient (" + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "inline_equation", + "content": "\\alpha > 0" + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "), where larger values indicate stronger amplification of visual attenuation." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "content": "tion. The suppression coefficient " + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "inline_equation", + "content": "0 < \\beta < 1" + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "content": ") determines the extent of attention suppression directed at system prompts. The enhancement and suppression mask matrices, " + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "inline_equation", + "content": "M_{l,h}^{enh}" + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "inline_equation", + "content": "M_{l,h}^{sup}" + }, + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "content": " respectively, are defined to guide the modulation of attention elements:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 137, + 295, + 158 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 137, + 295, + 158 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 295, + 158 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} M _ {l, h} ^ {e n h} (i, j) = \\mathbb {I} (i \\in \\mathcal {T}, j \\in \\mathcal {V}), \\\\ 1. 5 ^ {\\text {s u p}} (i, j) = \\mathbb {I} (i = \\mathcal {T}, j = \\mathcal {Q}). \\end{array} \\tag {9}", + "image_path": "c8f13a8898f1f79f023450224eb4720425f24d7272f508eaac9db15c1b1adbef.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 153, + 238, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 153, + 238, + 168 + ], + "spans": [ + { + "bbox": [ + 114, + 153, + 238, + 168 + ], + "type": "interline_equation", + "content": "M _ {l, h} ^ {s u p} (i, j) = \\mathbb {I} (i \\in \\mathcal {T}, j \\in \\mathcal {S}).", + "image_path": "0f8cb9e1d9e4977cd45b50bbf47700154d6997e655db5eea76ff1883310ed170.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 175, + 296, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 175, + 296, + 247 + ], + "spans": [ + { + "bbox": [ + 55, + 175, + 296, + 247 + ], + "type": "text", + "content": "These modifications optimize attention allocation by enhancing the model's focus on visual features during modality fusion and minimizing superfluous attention to system prompts. As illustrated in Fig. 6, preliminary analysis indicates that this approach effectively mitigates hallucination issues by promoting greater attention to visual information." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 253, + 216, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 253, + 216, + 266 + ], + "spans": [ + { + "bbox": [ + 55, + 253, + 216, + 266 + ], + "type": "text", + "content": "5.2. Visual Perception Restriction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 270, + 296, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 270, + 296, + 378 + ], + "spans": [ + { + "bbox": [ + 55, + 270, + 296, + 378 + ], + "type": "text", + "content": "Enhancing visual attention across all attention heads in the middle layers can be overly aggressive and may negatively impact content generation. To address this, we propose a selective enhancement strategy. Specifically, we identify and isolate the attention heads that exhibit higher sensitivity to visual information, which we term visual perception heads. We then restrict the visual attention enhancement to these visual perception heads, ensuring better utilization of visual information while maintaining overall model performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "text", + "content": "In the model, attention heads that allocate more attention to visual features demonstrate heightened sensitivity to visual information. Let " + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "inline_equation", + "content": "A_{l,h}" + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "text", + "content": " represent the attention matrix of the " + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "text", + "content": "-th attention head in the " + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "text", + "content": "-th layer of the model, with its corresponding visual attention allocation denoted by " + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{vis}}^{l,h}" + }, + { + "bbox": [ + 55, + 379, + 296, + 462 + ], + "type": "text", + "content": ". In each attention layer, we identify the attention heads whose visual attention allocation fall within the top" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 554, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 109 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 109 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 313, + 72, + 554, + 109 + ], + "type": "text", + "content": " and designate them as visual perception heads, subsequently redistributing their attention. The attention matrices of the remaining attention heads are kept unchanged." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 119, + 391, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 119, + 391, + 133 + ], + "spans": [ + { + "bbox": [ + 313, + 119, + 391, + 133 + ], + "type": "text", + "content": "6. Experiment" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 140, + 555, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 140, + 555, + 236 + ], + "spans": [ + { + "bbox": [ + 313, + 140, + 555, + 236 + ], + "type": "text", + "content": "This section demonstrates the effectiveness of the proposed VAF method in mitigating hallucinations. Sec. 6.1 outlines the experimental setup, detailing the evaluation benchmarks and VAF parameter configurations. Sec. 6.2 then presents the experimental results from three perspectives: reduction of hallucinations, coherence of generated content, and inference speed. Finally, Sec. 6.3 further verifies the contribution of each VAF component through ablation studies." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 243, + 441, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 243, + 441, + 256 + ], + "spans": [ + { + "bbox": [ + 313, + 243, + 441, + 256 + ], + "type": "text", + "content": "6.1. Experimental Settings" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 261, + 554, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 261, + 554, + 297 + ], + "spans": [ + { + "bbox": [ + 313, + 261, + 554, + 297 + ], + "type": "text", + "content": "In Sec. 6.1.1, we present the selected datasets and evaluation metrics. Sec. 6.1.2 details the chosen MLLM backbone models, and Sec. 6.1.3 outlines the baseline settings." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 304, + 473, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 304, + 473, + 315 + ], + "spans": [ + { + "bbox": [ + 313, + 304, + 473, + 315 + ], + "type": "text", + "content": "6.1.1. Datasets & Evaluation Metrics" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 319, + 554, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 554, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 554, + 437 + ], + "type": "text", + "content": "Polling-based Object Probing Evaluation (POPE). POPE [30] is a novel framework designed to evaluate object hallucinations in MLLMs. Departing from traditional caption-based approaches, POPE frames hallucination detection as a binary task by posing straightforward yes-or-no questions regarding the presence of specific objects in an image (e.g., \"Is there a chair in the image?\"). Performance on POPE is measured across four metrics: Accuracy, Precision, Recall, and F1 score, allowing for a thorough evaluation of hallucinations in MLLMs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 437, + 554, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 437, + 554, + 463 + ], + "spans": [ + { + "bbox": [ + 313, + 437, + 554, + 463 + ], + "type": "text", + "content": "Multimodal Model Evaluation (MME). MME [13] benchmark provides a comprehensive framework for evalu" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 100, + 479, + 515, + 679 + ], + "blocks": [ + { + "bbox": [ + 100, + 479, + 515, + 679 + ], + "lines": [ + { + "bbox": [ + 100, + 479, + 515, + 679 + ], + "spans": [ + { + "bbox": [ + 100, + 479, + 515, + 679 + ], + "type": "table", + "html": "
CategoryMethodLLaVA-v1.5-7BLLaVA-v1.5-13BQwen-VL-Chat-7B
AccuracyF1-scoreAccuracyF1-scoreAccuracyF1-score
RandomRegular87.8↑0.087.5↑0.087.6↑0.087.4↑0.088.2↑0.087.9↑0.0
VCD88.4↑0.687.7↑0.288.9↑1.387.8↑0.489.1↑0.988.4↑0.5
ICD88.1↑0.387.6↑0.188.1↑0.587.6↑0.288.9↑0.788.1↑0.2
VAF89.6↑1.889.3↑1.890.1↑2.589.9↑2.590.0↑1.889.7↑1.8
PopularRegular82.5↑0.083.2↑0.082.7↑0.084.1↑0.082.4↑0.083.1↑0.0
VCD83.1↑0.684.1↑0.983.7↑1.085.1↑1.083.0↑0.684.1↑1.0
ICD82.1↓0.482.9↓0.382.9↑0.284.3↑0.283.2↑0.884.5↑1.4
VAF84.5↑2.084.9↑1.785.2↑2.586.4↑2.384.9↑2.585.1↑2.0
AdversarialRegular77.6↑0.079.4↑0.077.8↑0.079.5↑0.077.2↑0.078.9↑0.0
VCD78.1↑0.579.6↑0.278.2↑0.479.7↑0.278.8↑1.680.1↑1.2
ICD78.5↑0.979.9↑0.579.1↑1.380.1↑0.678.1↑0.979.2↑0.3
VAF80.1↑2.581.0↑1.680.7↑2.981.7↑2.280.4↑3.281.2↑2.3
", + "image_path": "8b5a7597a40413b0b625d92fa3f609bf4ba008a8e7e335f5c1d5bac48951892a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 687, + 554, + 711 + ], + "lines": [ + { + "bbox": [ + 55, + 687, + 554, + 711 + ], + "spans": [ + { + "bbox": [ + 55, + 687, + 554, + 711 + ], + "type": "text", + "content": "Table 2. Performance on POPE. Results are averaged across the MS-COCO, A-OKVQA, and GQA datasets. The VAF method demonstrates superior hallucination suppression across all three MLLMs. The best performance for each setting is highlighted in red." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 70, + 545, + 272 + ], + "blocks": [ + { + "bbox": [ + 69, + 70, + 545, + 272 + ], + "lines": [ + { + "bbox": [ + 69, + 70, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 545, + 272 + ], + "type": "table", + "html": "
ModelMethodObject-levelAttribute-levelTotal Score
ExistenceCountPositionColor
LLaVA-v1.5-7BRegular185.00↑0.00146.67↑0.00128.33↑0.00150.00↑0.00610.00↑0.00
VCD185.00↑0.00141.33↓5.34128.33↑0.00153.00↑3.00607.66↓2.34
ICD185.00↑0.00148.33↑1.66126.66↓1.67148.33↓1.67608.32↓1.68
VAF195.00↑10.00158.33↑11.66128.33↑0.00155.00↑5.00636.67↑26.67
LLaVA-v1.5-13BRegular185.00↑0.00155.00↑0.00133.33↑0.00165.00↑0.00638.33↑0.00
VCD185.00↑0.00155.00↑0.00130.00↓3.33168.33↑3.33638.33↑0.00
ICD183.33↓1.67153.33↓1.67131.67↓1.66165.00↑0.00633.33↓5.00
VAF195.00↑10.00160.00↑5.00136.67↑3.34170.00↑5.00661.67↑23.34
Qwen-VL-7BRegular158.33↑0.00150.00↑0.00128.33↑0.00170.00↑0.00606.66↑0.00
VCD158.33↑0.00150.00↑0.00133.33↑5.00175.00↑5.00616.66↑10.00
ICD128.33↓30.00151.67↑1.67128.33↑0.00170.00↑0.00578.33↓28.33
VAF165.00↑6.67155.00↑5.00133.33↑5.00175.00↑5.00628.33↑21.67
", + "image_path": "d5ad6983b6128eeec1e4e9721d200c6bac668bca9bea7e5ef70ec2c07258095b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 279, + 555, + 304 + ], + "lines": [ + { + "bbox": [ + 55, + 279, + 555, + 304 + ], + "spans": [ + { + "bbox": [ + 55, + 279, + 555, + 304 + ], + "type": "text", + "content": "Table 3. Results on the MME subset. Across three MLLMs, the VAF method achieved the most effective suppression of both object-level and attribute-level hallucinations. The highest scores in each setting are highlighted in red." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 323, + 294, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 323, + 294, + 418 + ], + "spans": [ + { + "bbox": [ + 54, + 323, + 294, + 418 + ], + "type": "text", + "content": "ating MLLMs across both perceptual and cognitive dimensions. It consists of ten perception-oriented tasks and four cognition-oriented tasks, with model performance assessed through accuracy metrics. In addition to the full dataset, we leverage specific subsets, such as object existence and counting to analyze object-level hallucinations, while position and color subsets are employed to examine attribute-level hallucinations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 418, + 295, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 418, + 295, + 503 + ], + "spans": [ + { + "bbox": [ + 54, + 418, + 295, + 503 + ], + "type": "text", + "content": "Novel Object Captioning at Scale (Nocaps). NoCaps [3] benchmark is designed to evaluate image captioning models on their ability to describe novel objects absent from standard datasets like COCO. Model performance is quantified using the CIDEr score, providing a basis to assess the coherence and accuracy of generated captions in response to images containing unfamiliar objects." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 510, + 165, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 165, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 165, + 521 + ], + "type": "text", + "content": "6.1.2. MLLM Backbones" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 524, + 295, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 524, + 295, + 656 + ], + "spans": [ + { + "bbox": [ + 54, + 524, + 295, + 656 + ], + "type": "text", + "content": "In comparison to the Q-former structure, linear projection demonstrates greater efficiency in aligning visual and textual features. This advantage is evident in MLLMs with linear projection architectures, such as LLaVA and Qwen-VL, which outperform Q-former-based MLLMs like Instruct-BLIP and MiniGPT4. Based on these findings, we selected three linear-projection-based MLLMs, specifically LLaVA-v1.5-7B, LLaVA-v1.5-13B [35], and Qwen-VL-7B [5], to evaluate the effectiveness of our proposed VAF method. Detailed prompt templates for each model across various benchmarks are included in Sec. 10." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 662, + 159, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 662, + 159, + 675 + ], + "spans": [ + { + "bbox": [ + 55, + 662, + 159, + 675 + ], + "type": "text", + "content": "6.1.3. Baseline Settings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 677, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 296, + 715 + ], + "type": "text", + "content": "We primarily compared our approach to the VCD [23] and ICD [47] methods. VCD mitigates hallucinations by contrasting output distributions derived from original and dis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "type": "text", + "content": "torted visual inputs, while ICD reduces hallucinated concepts by comparing distributions generated with standard versus disrupted instructions. To ensure consistency and reproducibility in our comparisons, all methods use greedy search. Unless specified otherwise, our experiments set " + }, + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "type": "inline_equation", + "content": "\\beta = 0.1" + }, + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "type": "inline_equation", + "content": "\\alpha = 0.15" + }, + { + "bbox": [ + 313, + 323, + 555, + 396 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 403, + 434, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 403, + 434, + 415 + ], + "spans": [ + { + "bbox": [ + 313, + 403, + 434, + 415 + ], + "type": "text", + "content": "6.2. Results and Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 420, + 555, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 555, + 481 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 555, + 481 + ], + "type": "text", + "content": "Sec. 6.2.1 examines the effectiveness of various methods in mitigating hallucinations, while Sec. 6.2.2 assesses their impact on the quality of generated content. Sec. 6.2.3 then analyzes the influence of each method on inference speed. Additional experimental results are provided in Sec. 8." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 486, + 448, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 448, + 498 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 448, + 498 + ], + "type": "text", + "content": "6.2.1. Hallucination Mitigation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 501, + 554, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 554, + 610 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 554, + 610 + ], + "type": "text", + "content": "Tab. 2 presents the experimental results of the VAF method on the POPE benchmark, with results averaged across the MSCOCO [31], A-OKVQA [42], and GQA [20] datasets. Applied to both the LLaVA-v1.5 model family and the Qwen-VL model, the VAF method consistently surpasses the VCD and ICD methods in reducing hallucinations. Tab. 3 further highlights the performance of VAF on the MME benchmark, demonstrating its effectiveness in suppressing both object-level and attribute-level hallucinations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 615, + 484, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 615, + 484, + 625 + ], + "spans": [ + { + "bbox": [ + 313, + 615, + 484, + 625 + ], + "type": "text", + "content": "6.2.2. Coherence of Generated Content" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "text", + "content": "Tab. 4 presents the experimental results for various methods on the Nocaps and ScienceQA datasets. It is evident that VCD and ICD substantially degrade the quality of the generated content. Specifically, on the Nocaps dataset, VCD and ICD reduce CIDEr scores by " + }, + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 313, + 629, + 555, + 715 + ], + "type": "text", + "content": ", respectively. This degradation primarily arises from the crude disruption of language priors by contrastive decoding methods," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 62, + 71, + 293, + 218 + ], + "blocks": [ + { + "bbox": [ + 62, + 71, + 293, + 218 + ], + "lines": [ + { + "bbox": [ + 62, + 71, + 293, + 218 + ], + "spans": [ + { + "bbox": [ + 62, + 71, + 293, + 218 + ], + "type": "table", + "html": "
ModelDecodingScienceQANocaps
AccuracyCIDEr
LLaVA-v1.5-7BRegular68.078.7
VCD64.565.7
ICD62.462.3
VAF68.578.8
LLaVA-v1.5-13BRegular71.682.6
VCD70.068.9
ICD69.260.3
VAF71.782.3
", + "image_path": "ed04285b06690a6f4f7556e8adf2d935197d4d1b0fc3e108bbbb0426666d9aa5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 270, + 295, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 270, + 295, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 270, + 295, + 319 + ], + "type": "text", + "content": "which leads to generated content lacking coherence and accuracy. By contrast, our method demonstrates minimal negative impact on prediction results, maintaining both coherence and accuracy effectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 324, + 153, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 324, + 153, + 337 + ], + "spans": [ + { + "bbox": [ + 55, + 324, + 153, + 337 + ], + "type": "text", + "content": "6.2.3. Inference Speed" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 339, + 296, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 339, + 296, + 411 + ], + "spans": [ + { + "bbox": [ + 55, + 339, + 296, + 411 + ], + "type": "text", + "content": "Fig. 7 illustrates the impact of different strategies on model inference speed within the Nocaps dataset. In comparison, the VCD and ICD methods nearly double the inference time due to the need to process contrastive input samples, whereas the VAF method has minimal impact on the inference speed of multimodal large language models." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 69, + 426, + 271, + 566 + ], + "blocks": [ + { + "bbox": [ + 69, + 426, + 271, + 566 + ], + "lines": [ + { + "bbox": [ + 69, + 426, + 271, + 566 + ], + "spans": [ + { + "bbox": [ + 69, + 426, + 271, + 566 + ], + "type": "image", + "image_path": "a52934bd49c85c2eeb682f6e85fd76b33397436a158a1df077b2d7eddeb2b2c9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 573, + 295, + 608 + ], + "lines": [ + { + "bbox": [ + 55, + 573, + 295, + 608 + ], + "spans": [ + { + "bbox": [ + 55, + 573, + 295, + 608 + ], + "type": "text", + "content": "Figure 7. Comparison of different strategies on inference speed. The VCD and ICD methods reduce inference speed by " + }, + { + "bbox": [ + 55, + 573, + 295, + 608 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 55, + 573, + 295, + 608 + ], + "type": "text", + "content": ", whereas the VAF method shows minimal impact." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 624, + 149, + 637 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 149, + 637 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 149, + 637 + ], + "type": "text", + "content": "6.3. Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "Ablation studies on the enhancement coefficient " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": " were conducted using the COCO-Random dataset within the POPE benchmark to understand its influence on model performance. Fig. 8 demonstrates that when " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "0 < \\alpha < 0.25" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": ", model hallucinations are effectively suppressed. However, when " + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": " surpasses 0.25, performance starts to degrade. We" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 554, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 120 + ], + "type": "text", + "content": "propose that this reduction in performance may stem from an excessive focus on visual features, disrupting the balanced integration of language information and diminishing overall model effectiveness." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 329, + 129, + 538, + 271 + ], + "blocks": [ + { + "bbox": [ + 329, + 129, + 538, + 271 + ], + "lines": [ + { + "bbox": [ + 329, + 129, + 538, + 271 + ], + "spans": [ + { + "bbox": [ + 329, + 129, + 538, + 271 + ], + "type": "image", + "image_path": "43f9cde0505695001d35da1fe6336854987e8cb1c5daf157a6cf91f7c5f9b531.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 327, + 279, + 541, + 291 + ], + "lines": [ + { + "bbox": [ + 327, + 279, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 327, + 279, + 541, + 291 + ], + "type": "text", + "content": "Figure 8. Ablation study of " + }, + { + "bbox": [ + 327, + 279, + 541, + 291 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 327, + 279, + 541, + 291 + ], + "type": "text", + "content": " on the POPE benchmark." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 294, + 555, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 294, + 555, + 390 + ], + "spans": [ + { + "bbox": [ + 313, + 294, + 555, + 390 + ], + "type": "text", + "content": "We performed ablation studies on the visual perception restriction mechanism, evaluating its impact on the POPE and Nocaps benchmarks. Tab. 5 highlights the effects of restricting attention reallocation to visual perception heads. Increasing attention to visual features alone reduces model hallucinations, while confining this reallocation strategy to visual perception heads minimizes adverse effects on content quality. More ablation studies can be found in Sec. 9." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 323, + 399, + 547, + 482 + ], + "blocks": [ + { + "bbox": [ + 55, + 227, + 295, + 251 + ], + "lines": [ + { + "bbox": [ + 55, + 227, + 295, + 251 + ], + "spans": [ + { + "bbox": [ + 55, + 227, + 295, + 251 + ], + "type": "text", + "content": "Table 4. Results on SQA and Nocaps datasets. The highest and second-highest scores are marked in red and blue, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 323, + 399, + 547, + 482 + ], + "lines": [ + { + "bbox": [ + 323, + 399, + 547, + 482 + ], + "spans": [ + { + "bbox": [ + 323, + 399, + 547, + 482 + ], + "type": "table", + "html": "
ModelVisual RestrictionPOPENocaps
LLaVA-7B89.878.8
X89.976.4
LLaVA-13B90.282.3
X90.081.1
", + "image_path": "df3d04f20204c760b313ec2c84457e3343648bd94b9f015e7e1ca4671fe51867.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 491, + 554, + 525 + ], + "lines": [ + { + "bbox": [ + 313, + 491, + 554, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 554, + 525 + ], + "type": "text", + "content": "Table 5. Ablation Study of Visual Perception Restriction Mechanism. Restricting attention redistribution to the visual perception heads more effectively preserves the quality of generated content." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 314, + 538, + 384, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 538, + 384, + 550 + ], + "spans": [ + { + "bbox": [ + 314, + 538, + 384, + 550 + ], + "type": "text", + "content": "7. conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 558, + 555, + 713 + ], + "type": "text", + "content": "In this paper, we identify two key drawbacks of using contrastive decoding to mitigate hallucinations in MLLMs: reduced quality of generated content and slower inference speed. To address these challenges, we propose a novel approach, Visual Amplification Fusion, which effectively mitigates hallucinations while preserving both inference speed and content generation quality. By enhancing the attention to visual features during modality fusion, VAF minimizes the over-reliance on language priors, ensuring a high degree of consistency between generated content and visual inputs. Extensive experiments across multiple benchmarks and MLLMs demonstrate that VAF provides a clear advantage in hallucination mitigation." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 158, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 158, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 158, + 85 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 163 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 163 + ], + "type": "text", + "content": "This work is supported by the National Natural Science Foundation of China under Grant 62176246. This work is also supported by Anhui Province Key Research and Development Plan (202304a05020045), Anhui Province Natural Science Foundation (2208085UD17) and National Natural Science Foundation of China under Grant 62406098." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 172, + 115, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 172, + 115, + 185 + ], + "spans": [ + { + "bbox": [ + 56, + 172, + 115, + 185 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 193, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 61, + 193, + 296, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 193, + 296, + 247 + ], + "spans": [ + { + "bbox": [ + 61, + 193, + 296, + 247 + ], + "type": "text", + "content": "[1] Vedika Agarwal, Rakshith Shetty, and Mario Fritz. Towards causal vqa: Revealing and reducing spurious correlations by invariant and covariant semantic editing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9690-9698, 2020. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 248, + 296, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 248, + 296, + 280 + ], + "spans": [ + { + "bbox": [ + 62, + 248, + 296, + 280 + ], + "type": "text", + "content": "[2] Aishwarya Agrawal, Dhruv Batra, and Devi Parikh. Analyzing the behavior of visual question answering models. arXiv preprint arXiv:1606.07356, 2016. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 282, + 295, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 282, + 295, + 335 + ], + "spans": [ + { + "bbox": [ + 62, + 282, + 295, + 335 + ], + "type": "text", + "content": "[3] Harsh Agrawal, Karan Desai, Yufei Wang, Xinlei Chen, Rishabh Jain, Mark Johnson, Dhruv Batra, Devi Parikh, Stefan Lee, and Peter Anderson. nocaps: novel object captioning at scale. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV). IEEE, 2019. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 337, + 295, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 337, + 295, + 357 + ], + "spans": [ + { + "bbox": [ + 62, + 337, + 295, + 357 + ], + "type": "text", + "content": "[4] Jinze Bai, Shuai Bai, and et al. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 359, + 295, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 359, + 295, + 391 + ], + "spans": [ + { + "bbox": [ + 62, + 359, + 295, + 391 + ], + "type": "text", + "content": "[5] Jinze Bai, Shuai Bai, and et al. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 2, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 393, + 294, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 393, + 294, + 413 + ], + "spans": [ + { + "bbox": [ + 62, + 393, + 294, + 413 + ], + "type": "text", + "content": "[6] Rohan Bavishi, Erich Elsen, and et al. Introducing our multimodal models, 2023. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 415, + 295, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 415, + 295, + 469 + ], + "spans": [ + { + "bbox": [ + 62, + 415, + 295, + 469 + ], + "type": "text", + "content": "[7] Ali Furkan Biten, Lluís Gómez, and Dimosthenis Karatzas. Let there be a clock on the beach: Reducing object hallucination in image captioning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1381-1390, 2022. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 471, + 295, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 471, + 295, + 502 + ], + "spans": [ + { + "bbox": [ + 62, + 471, + 295, + 502 + ], + "type": "text", + "content": "[8] Keqin Chen, Zhao Zhang, and et al. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 504, + 295, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 504, + 295, + 557 + ], + "spans": [ + { + "bbox": [ + 62, + 504, + 295, + 557 + ], + "type": "text", + "content": "[9] Long Chen, Oleg Sinavski, Jan Hunermann, Alice Karnsund, Andrew James Willmott, Danny Birch, Daniel Maund, and Jamie Shotton. Driving with llms: Fusing object-level vector modality for explainable autonomous driving. arXiv preprint arXiv:2310.01957, 2023. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 559, + 295, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 559, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 57, + 559, + 295, + 601 + ], + "type": "text", + "content": "[10] Zhe Chen, Weiyun Wang, and et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 603, + 295, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 603, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 57, + 603, + 295, + 645 + ], + "type": "text", + "content": "[11] Wei-Lin Chiang and Zhuohan et al Li. Vicuna: An opensource chatbot impressing gpt-4 with " + }, + { + "bbox": [ + 57, + 603, + 295, + 645 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 57, + 603, + 295, + 645 + ], + "type": "text", + "content": " chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2023. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 647, + 295, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 295, + 679 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 295, + 679 + ], + "type": "text", + "content": "[12] Wenliang Dai and Junnan Li et al. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 57, + 681, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 681, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 681, + 295, + 713 + ], + "type": "text", + "content": "[13] Chaoyou Fu, Peixian Chen, and et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 6" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 554, + 713 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "type": "text", + "content": "[14] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 129, + 553, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 553, + 162 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 553, + 162 + ], + "type": "text", + "content": "[15] Anisha Gunjal, Jihan Yin, and Erhan Bas. Detecting and preventing hallucinations in large vision language models. arXiv preprint arXiv:2308.06394, 2023. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 163, + 553, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 163, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 317, + 163, + 553, + 228 + ], + "type": "text", + "content": "[16] Vipul Gupta, Zhuowan Li, Adam Kortylewski, Chenyu Zhang, Yingwei Li, and Alan Yuille. Swapmix: Diagnosing and regularizing the over-reliance on visual context in visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5078-5088, 2022. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 230, + 553, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 230, + 553, + 273 + ], + "spans": [ + { + "bbox": [ + 317, + 230, + 553, + 273 + ], + "type": "text", + "content": "[17] Yudong Han, Liqiang Nie, Jianhua Yin, Jianlong Wu, and Yan Yan. Visual perturbation-aware collaborative learning for overcoming the language prior problem. arXiv preprint arXiv:2207.11850, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 275, + 553, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 275, + 553, + 317 + ], + "spans": [ + { + "bbox": [ + 317, + 275, + 553, + 317 + ], + "type": "text", + "content": "[18] Mingzhe Hu, Shaoyan Pan, Yuheng Li, and Xiaofeng Yang. Advancing medical imaging with language models: A journey from n-grams to chatgpt. arXiv preprint arXiv:2304.04920, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 319, + 553, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 319, + 553, + 384 + ], + "spans": [ + { + "bbox": [ + 317, + 319, + 553, + 384 + ], + "type": "text", + "content": "[19] Qidong Huang, Xiaoyi Dong, Pan Zhang, Bin Wang, Conghui He, Jiaqi Wang, Dahua Lin, Weiming Zhang, and Nenghai Yu. Opera: Alleviating hallucination in multimodal large language models via over-trust penalty and retrospection-allocation. In CVPR, pages 13418-13427, 2024. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 387, + 553, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 387, + 553, + 419 + ], + "spans": [ + { + "bbox": [ + 316, + 387, + 553, + 419 + ], + "type": "text", + "content": "[20] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In CVPR, pages 6700-6709, 2019. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 421, + 553, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 421, + 553, + 463 + ], + "spans": [ + { + "bbox": [ + 317, + 421, + 553, + 463 + ], + "type": "text", + "content": "[21] Fushuo Huo, Wenchao Xu, Zhong Zhang, Haozhao Wang, Zhicheng Chen, and Peilin Zhao. Self-introspective decoding: Alleviating hallucinations for large vision-language models, 2024. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 465, + 553, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 465, + 553, + 498 + ], + "spans": [ + { + "bbox": [ + 317, + 465, + 553, + 498 + ], + "type": "text", + "content": "[22] Chaoya Jiang, Haiyang Xu, and et al. Hallucination augmented contrastive learning for multimodal large language model. In CVPR, pages 27036-27046, 2024. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 499, + 553, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 499, + 553, + 542 + ], + "spans": [ + { + "bbox": [ + 317, + 499, + 553, + 542 + ], + "type": "text", + "content": "[23] Sicong Leng, Hang Zhang, and et al. Mitigating object hallucinations in large vision-language models through visual contrastive decoding. In CVPR, pages 13872-13882, 2024. 2, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 544, + 553, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 544, + 553, + 576 + ], + "spans": [ + { + "bbox": [ + 317, + 544, + 553, + 576 + ], + "type": "text", + "content": "[24] Bo Li, Yuanhan Zhang, and et al. Mimic-it: Multi-modal in-context instruction tuning. arXiv preprint arXiv:2306.05425, 2023. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 578, + 553, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 578, + 553, + 600 + ], + "spans": [ + { + "bbox": [ + 317, + 578, + 553, + 600 + ], + "type": "text", + "content": "[25] Bo Li, Kaichen Zhang, and et al. Llava next: Stronger llms supercharge multimodal capabilities in the wild, 2024. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 601, + 553, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 601, + 553, + 634 + ], + "spans": [ + { + "bbox": [ + 317, + 601, + 553, + 634 + ], + "type": "text", + "content": "[26] Chunyuan Li, Cliff Wong, and et al. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. In NeurIPS, pages 28541-28564, 2023. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 635, + 553, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 635, + 553, + 678 + ], + "spans": [ + { + "bbox": [ + 317, + 635, + 553, + 678 + ], + "type": "text", + "content": "[27] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 2023. 1, 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 680, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 680, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 680, + 553, + 713 + ], + "type": "text", + "content": "[28] Xiang Lisa Li, Ari Holtzman, and et al. Contrastive decoding: Open-ended text generation as optimization. arXiv preprint arXiv:2210.15097, 2022. 2" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "text", + "content": "[29] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Wayne Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355, 2023. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 163 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 163 + ], + "type": "text", + "content": "[30] Yifan Li, Yifan Du, Kun Zhou, Jinpeng Wang, Xin Zhao, and Ji-Rong Wen. Evaluating object hallucination in large vision-language models. In EMNLP, pages 292-305, 2023. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 165, + 294, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 165, + 294, + 196 + ], + "spans": [ + { + "bbox": [ + 56, + 165, + 294, + 196 + ], + "type": "text", + "content": "[31] Tsung-Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 199, + 294, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 199, + 294, + 243 + ], + "spans": [ + { + "bbox": [ + 56, + 199, + 294, + 243 + ], + "type": "text", + "content": "[32] Fuxiao Liu, Kevin Lin, Linjie Li, Jianfeng Wang, Yaser Ya-coob, and Lijuan Wang. Mitigating hallucination in large multi-modal models via robust instruction tuning. arXiv preprint arXiv:2306.14565, 2023. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 245, + 294, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 245, + 294, + 277 + ], + "spans": [ + { + "bbox": [ + 56, + 245, + 294, + 277 + ], + "type": "text", + "content": "[33] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, pages 34892-34916, 2023. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 280, + 294, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 280, + 294, + 323 + ], + "spans": [ + { + "bbox": [ + 56, + 280, + 294, + 323 + ], + "type": "text", + "content": "[34] Haokun Liu, Yaonan Zhu, Kenji Kato, Izumi Kondo, Tadayoshi Aoyama, and Yasuhisa Hasegawa. Lm-based human-robot collaboration framework for manipulation tasks. arXiv preprint arXiv:2308.14972, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 326, + 294, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 326, + 294, + 358 + ], + "spans": [ + { + "bbox": [ + 56, + 326, + 294, + 358 + ], + "type": "text", + "content": "[35] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In CVPR, pages 26296-26306, 2024. 1, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 361, + 294, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 361, + 294, + 404 + ], + "spans": [ + { + "bbox": [ + 56, + 361, + 294, + 404 + ], + "type": "text", + "content": "[36] Zhi-Song Liu, Robin Courant, and Vicky Kalogeiton. Funnynet-w: Multimodal learning of funny moments in videos in the wild. International Journal of Computer Vision, pages 1-22, 2024. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 406, + 294, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 406, + 294, + 450 + ], + "spans": [ + { + "bbox": [ + 56, + 406, + 294, + 450 + ], + "type": "text", + "content": "[37] Holy Lvenia, Wenliang Dai, Samuel Cahyawijaya, Ziwei Ji, and Pascale Fung. Negative object presence evaluation (nope) to measure object hallucination in vision-language models. arXiv preprint arXiv:2310.05338, 2023. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 452, + 294, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 452, + 294, + 517 + ], + "spans": [ + { + "bbox": [ + 56, + 452, + 294, + 517 + ], + "type": "text", + "content": "[38] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In The 36th Conference on Neural Information Processing Systems (NeurIPS), 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 519, + 294, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 519, + 294, + 563 + ], + "spans": [ + { + "bbox": [ + 56, + 519, + 294, + 563 + ], + "type": "text", + "content": "[39] Jinjie Mai, Jun Chen, Bing Li, Guocheng Qian, Mohamed Elhoseiny, and Bernard Ghanem. Llm as a robotic brain: Unifying egocentric memory and control. arXiv preprint arXiv:2304.09349, 2023. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 565, + 294, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 565, + 294, + 586 + ], + "spans": [ + { + "bbox": [ + 56, + 565, + 294, + 586 + ], + "type": "text", + "content": "[40] AI Meta. Introducing meta llama 3: The most capable openly available llm to date. Meta AI, 2024. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 589, + 294, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 589, + 294, + 643 + ], + "spans": [ + { + "bbox": [ + 56, + 589, + 294, + 643 + ], + "type": "text", + "content": "[41] Yulei Niu, Kaihua Tang, Hanwang Zhang, Zhiwu Lu, XianSheng Hua, and Ji-Rong Wen. Counterfactual vqa: A cause-effect look at language bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12700-12710, 2021. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 646, + 294, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 646, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 646, + 294, + 678 + ], + "type": "text", + "content": "[42] Dustin Schwenk, Apoorv Khandelwal, and et al. A-okvqa: A benchmark for visual question answering using world knowledge. In ECCV, pages 146–162, 2022. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 680, + 294, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 294, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 294, + 714 + ], + "type": "text", + "content": "[43] Rohan Taori, Ishaan Gulrajani, and et al. Stanford alpaca: an instruction-following llama model (2023). URL https://github.com/tatsu-lab/stanford_alpaca, 1(9), 2023. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 315, + 72, + 554, + 555 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 315, + 72, + 554, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 554, + 106 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 554, + 106 + ], + "type": "text", + "content": "[44] Hugo Touvron, Thibaut Lavril, and et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 107, + 554, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 107, + 554, + 139 + ], + "spans": [ + { + "bbox": [ + 315, + 107, + 554, + 139 + ], + "type": "text", + "content": "[45] Hugo Touvron, Louis Martin, and et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 141, + 554, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 141, + 554, + 185 + ], + "spans": [ + { + "bbox": [ + 315, + 141, + 554, + 185 + ], + "type": "text", + "content": "[46] Sheng Wang, Zihao Zhao, Xi Ouyang, Qian Wang, and Dinggang Shen. Chatcad: Interactive computer-aided diagnosis on medical image using large language models. arXiv preprint arXiv:2302.07257, 2023. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 186, + 554, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 186, + 554, + 228 + ], + "spans": [ + { + "bbox": [ + 315, + 186, + 554, + 228 + ], + "type": "text", + "content": "[47] Xintong Wang, Jingheng Pan, and et al. Mitigating hallucinations in large vision-language models with instruction contrastive decoding. arXiv preprint arXiv:2403.18715, 2024.2, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 230, + 554, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 230, + 554, + 274 + ], + "spans": [ + { + "bbox": [ + 315, + 230, + 554, + 274 + ], + "type": "text", + "content": "[48] Yike Wu, Yu Zhao, Shiwan Zhao, Ying Zhang, Xiaojie Yuan, Guoqing Zhao, and Ning Jiang. Overcoming language priors in visual question answering via distinguishing superficially similar instances. arXiv preprint arXiv:2209.08529, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 276, + 554, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 276, + 554, + 308 + ], + "spans": [ + { + "bbox": [ + 315, + 276, + 554, + 308 + ], + "type": "text", + "content": "[49] Zhenyu Wu, Ziwei Wang, Xiuwei Xu, Jiwen Lu, and Haibin Yan. Embodied task planning with large language models. arXiv preprint arXiv:2307.01848, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 309, + 554, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 309, + 554, + 353 + ], + "spans": [ + { + "bbox": [ + 315, + 309, + 554, + 353 + ], + "type": "text", + "content": "[50] Hong Yan, Lijun Liu, Xupeng Feng, and Qingsong Huang. Overcoming language priors with self-contrastive learning for visual question answering. *Multimedia Tools and Applications*, 82(11):16343–16358, 2023. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 354, + 554, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 354, + 554, + 387 + ], + "spans": [ + { + "bbox": [ + 315, + 354, + 554, + 387 + ], + "type": "text", + "content": "[51] Qinghao Ye, Haiyang Xu, and et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 388, + 554, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 388, + 554, + 421 + ], + "spans": [ + { + "bbox": [ + 315, + 388, + 554, + 421 + ], + "type": "text", + "content": "[52] Shilong Zhang, Peize Sun, and et al. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 422, + 554, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 422, + 554, + 477 + ], + "spans": [ + { + "bbox": [ + 315, + 422, + 554, + 477 + ], + "type": "text", + "content": "[53] Ren Zhibo, Wang Huizhen, Zhu Muhua, Wang Yichao, Xiao Tong, and Zhu Jingbo. Overcoming language priors with counterfactual inference for visual question answering. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics, pages 600-610, 2023. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 478, + 554, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 478, + 554, + 510 + ], + "spans": [ + { + "bbox": [ + 315, + 478, + 554, + 510 + ], + "type": "text", + "content": "[54] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 130(9):2337-2348, 2022. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 511, + 554, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 511, + 554, + 555 + ], + "spans": [ + { + "bbox": [ + 315, + 511, + 554, + 555 + ], + "type": "text", + "content": "[55] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023. 1, 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 63, + 68, + 547, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 68, + 547, + 103 + ], + "spans": [ + { + "bbox": [ + 63, + 68, + 547, + 103 + ], + "type": "text", + "content": "ClearSight: Visual Signal Enhancement for Object Hallucination Mitigation in Multimodal Large Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 141, + 239, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 141, + 239, + 155 + ], + "spans": [ + { + "bbox": [ + 55, + 141, + 239, + 155 + ], + "type": "text", + "content": "8. Additional Experimental Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 159, + 296, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 159, + 296, + 243 + ], + "spans": [ + { + "bbox": [ + 55, + 159, + 296, + 243 + ], + "type": "text", + "content": "Sec. 8.1 presents the additional experimental results across all tasks in the MME benchmark. Sec. 8.2 details the experimental outcomes on the three datasets within the POPE benchmark. Sec. 8.3 compares the inference speeds and memory usage of various methods on ScienceQA and Nocaps. Sec. 8.4 highlights case studies of the VAF method on the LLaVA-Bench dataset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 141, + 524, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 141, + 524, + 155 + ], + "spans": [ + { + "bbox": [ + 313, + 141, + 524, + 155 + ], + "type": "text", + "content": "8.1. Detailed Experimental Results on MME" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 159, + 554, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 159, + 554, + 243 + ], + "spans": [ + { + "bbox": [ + 313, + 159, + 554, + 243 + ], + "type": "text", + "content": "Fig. 9 and Fig. 10 present the performance of the LLaVA model family on perception-related tasks within the MME benchmark. Models utilizing the VAF method demonstrate significantly better performance compared to those employing the VCD method. Notably, VAF achieves consistent leadership across all tasks with the LLaVA-v1.5-13B model, likely due to its ability to balance attention between" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 97, + 264, + 493, + 451 + ], + "blocks": [ + { + "bbox": [ + 97, + 264, + 493, + 451 + ], + "lines": [ + { + "bbox": [ + 97, + 264, + 493, + 451 + ], + "spans": [ + { + "bbox": [ + 97, + 264, + 493, + 451 + ], + "type": "image", + "image_path": "11b3663bf0024698189fe6f3112ff21dcb6c23465d60d7948074f56300a00e95.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 456, + 555, + 479 + ], + "lines": [ + { + "bbox": [ + 55, + 456, + 555, + 479 + ], + "spans": [ + { + "bbox": [ + 55, + 456, + 555, + 479 + ], + "type": "text", + "content": "Figure 9. Performance of LLaVA-v1.5-7B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 96, + 495, + 493, + 682 + ], + "blocks": [ + { + "bbox": [ + 96, + 495, + 493, + 682 + ], + "lines": [ + { + "bbox": [ + 96, + 495, + 493, + 682 + ], + "spans": [ + { + "bbox": [ + 96, + 495, + 493, + 682 + ], + "type": "image", + "image_path": "451a93053e14534301efd50b796019652e06c5d881b0caef998aa921869931cd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 687, + 555, + 711 + ], + "lines": [ + { + "bbox": [ + 55, + 687, + 555, + 711 + ], + "spans": [ + { + "bbox": [ + 55, + 687, + 555, + 711 + ], + "type": "text", + "content": "Figure 10. Performance of LLaVA-v1.5-13B model on perception-related tasks in the MME Benchmark. VAF consistently achieved the highest scores across nearly all perception tasks." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 97, + 75, + 492, + 262 + ], + "blocks": [ + { + "bbox": [ + 97, + 75, + 492, + 262 + ], + "lines": [ + { + "bbox": [ + 97, + 75, + 492, + 262 + ], + "spans": [ + { + "bbox": [ + 97, + 75, + 492, + 262 + ], + "type": "image", + "image_path": "d68d5414ee93abce830c76d8bad3c0dd6824d54150d5ca53159dc02a3792d344.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 267, + 555, + 291 + ], + "lines": [ + { + "bbox": [ + 55, + 267, + 555, + 291 + ], + "spans": [ + { + "bbox": [ + 55, + 267, + 555, + 291 + ], + "type": "text", + "content": "Figure 11. Performance of the LLaVA-v1.5-7B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 96, + 305, + 493, + 494 + ], + "blocks": [ + { + "bbox": [ + 96, + 305, + 493, + 494 + ], + "lines": [ + { + "bbox": [ + 96, + 305, + 493, + 494 + ], + "spans": [ + { + "bbox": [ + 96, + 305, + 493, + 494 + ], + "type": "image", + "image_path": "2888d89da363d0b60fede2e6cffd441ac8299d0e420dbeb7c1d6e1be961f06d9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 498, + 555, + 521 + ], + "lines": [ + { + "bbox": [ + 55, + 498, + 555, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 555, + 521 + ], + "type": "text", + "content": "Figure 12. Performance of the LLaVA-v1.5-13B model on cognition-related tasks in the MME Benchmark. The VAF method delivers a slight performance improvement compared to the degradation observed with the VCD method." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 542, + 295, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 542, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 542, + 295, + 567 + ], + "type": "text", + "content": "visual and language modalities, ensuring generated content aligns more closely with visual inputs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 570, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 570, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 54, + 570, + 295, + 713 + ], + "type": "text", + "content": "Fig. 11 and Fig. 12 illustrate the performance of LLaVA model family on cognition-related tasks within the MME benchmark. The application of the VCD method significantly impaired the model's performance on these tasks, likely due to its disruptive effect on linguistic priors. In contrast, VAF method not only avoided such negative impacts but also resulted in a slight performance improvement. This improvement is attributed to VAF's ability to precisely resolve the model's tendency to overlook visual features during the critical fusion stage, facilitating better integration of visual information while preserving its effective use of linguistic information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 541, + 526, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 541, + 526, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 526, + 555 + ], + "type": "text", + "content": "8.2. Detailed Experimental Results on POPE" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 570, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 555, + 715 + ], + "type": "text", + "content": "Tab. 6 and Tab. 9 summarize the experimental results of the LLaVA-v.15 model family on the MSCOCO, A-OKVQA, and GQA datasets within the POPE benchmark. The results highlight that our approach consistently delivers more stable and significantly improved hallucination suppression compared to the VCD method. This advantage stems from our direct enhancement of attention to visual features during the modality fusion process, enabling balanced outputs across both visual and linguistic modalities. In contrast, the VCD method relies on suppressing language priors to indirectly enhance attention to visual information. Decoding method employed in all experiments utilizes greedy search." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 130, + 69, + 487, + 462 + ], + "blocks": [ + { + "bbox": [ + 130, + 69, + 487, + 462 + ], + "lines": [ + { + "bbox": [ + 130, + 69, + 487, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 69, + 487, + 462 + ], + "type": "table", + "html": "
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.294.281.587.4
VCD88.594.481.887.6
VAF89.892.986.289.4
PopularRegular86.189.981.585.5
VCD86.390.081.785.8
VAF87.588.686.287.4
AdversarialRegular82.382.981.382.1
VCD82.382.981.682.4
VAF83.486.878.982.6
A-OKVQARandomRegular87.687.687.787.6
VCD87.787.887.687.8
VAF89.491.786.689.1
PopularRegular81.978.487.782.8
VCD82.178.587.983.1
VAF84.282.686.684.6
AdversarialRegular74.368.887.777.1
VCD72.468.087.476.7
VAF77.272.986.679.2
GQARandomRegular88.087.189.388.2
VCD88.687.489.588.8
VAF89.590.888.089.4
PopularRegular79.474.489.381.1
VCD79.974.689.581.7
VAF81.878.388.082.9
AdversarialRegular76.370.689.378.9
VCD75.270.289.978.3
VAF79.775.488.081.2
", + "image_path": "733a7a722b6287fcc9fdac7f056498e59787768f80cf66a874f9f622f4187058.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 108, + 505, + 506, + 611 + ], + "blocks": [ + { + "bbox": [ + 55, + 471, + 555, + 495 + ], + "lines": [ + { + "bbox": [ + 55, + 471, + 555, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 471, + 555, + 495 + ], + "type": "text", + "content": "Table 6. Experimental results of LLaVA-1.5-7B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 505, + 506, + 611 + ], + "lines": [ + { + "bbox": [ + 108, + 505, + 506, + 611 + ], + "spans": [ + { + "bbox": [ + 108, + 505, + 506, + 611 + ], + "type": "table", + "html": "
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular88.25:3214.5G0.111s
VCD88.510:3115.7G0.210s
VAF89.85:4814.5G0.116s
LLaVA-v1.5-13BRegular88.48:3926.7G0.173s
VCD88.619:3827.8G0.392s
VAF90.28:4526.7G0.175s
", + "image_path": "703f7ad82282a916228916e145b95efd4e173a30ad5a5153fe76f0e5c7ac0718.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 620, + 555, + 643 + ], + "lines": [ + { + "bbox": [ + 55, + 620, + 555, + 643 + ], + "spans": [ + { + "bbox": [ + 55, + 620, + 555, + 643 + ], + "type": "text", + "content": "Table 7. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on POPE benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 663, + 230, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 663, + 230, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 663, + 230, + 677 + ], + "type": "text", + "content": "8.3. Comparison of Inference Speeds" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 715 + ], + "type": "text", + "content": "Tab. 7 and Tab. 8 assess the impact of various methods on the LLaVA-v1.5 model family, focusing on inference speed" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 664, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 664, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 664, + 555, + 713 + ], + "type": "text", + "content": "and GPU memory usage. The results indicate that VCD significantly slows down inference, whereas our proposed method has a minimal effect. Furthermore, our method introduces no additional GPU memory requirements, in con" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 122 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 122 + ], + "type": "text", + "content": "trast to VCD, which incurs substantial GPU memory overhead. This efficiency is achieved because our approach eliminates the need for extra processing of contrastive inputs, thereby significantly reducing computational over" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 313, + 72, + 556, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 556, + 109 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 556, + 109 + ], + "type": "text", + "content": "head. All experiments were performed on a server equipped with a single A800 80G GPU, employing greedy search as the decoding strategy." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 108, + 137, + 507, + 245 + ], + "blocks": [ + { + "bbox": [ + 108, + 137, + 507, + 245 + ], + "lines": [ + { + "bbox": [ + 108, + 137, + 507, + 245 + ], + "spans": [ + { + "bbox": [ + 108, + 137, + 507, + 245 + ], + "type": "table", + "html": "
ModelMethodAccuracyTotal TimeGPU-MemoryLatency/Example
LLaVA-v1.5-7BRegular68.00:36:3914.5G0.488s
VCD64.51:18:4715.7G1.058s
VAF68.50:36:4114.5G0.489s
LLaVA-v1.5-13BRegular71.60:45:2026.7G0.604s
VCD70.01:46:5927.8G1.426s
VAF71.70:48:2426.7G0.645s
", + "image_path": "7a443757c9f12c09f98302669fb4ab28d8f5b4293a012d1a4cd3b4d9d4bd0d9f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 129, + 286, + 487, + 680 + ], + "blocks": [ + { + "bbox": [ + 55, + 253, + 555, + 277 + ], + "lines": [ + { + "bbox": [ + 55, + 253, + 555, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 253, + 555, + 277 + ], + "type": "text", + "content": "Table 8. A comparison of inference speed and GPU memory usage for different methods applied to the LLaVA-v1.5 model family on Nocaps benchmark. Results with the slowest inference speed and highest memory usage are highlighted in red." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 129, + 286, + 487, + 680 + ], + "lines": [ + { + "bbox": [ + 129, + 286, + 487, + 680 + ], + "spans": [ + { + "bbox": [ + 129, + 286, + 487, + 680 + ], + "type": "table", + "html": "
DatasetCategoryMethodAccuracyPrecisionRecallF1-score
MSCOCORandomRegular88.494.681.687.6
VCD88.695.081.887.7
VAF90.294.285.689.7
PopularRegular86.991.381.686.2
VCD87.091.482.086.4
VAF88.490.685.688.0
AdversarialRegular83.484.981.483.1
VCD83.785.181.783.1
VAF84.583.885.584.7
A-OKVQARandomRegular88.088.887.187.9
VCD88.289.287.587.9
VAF89.491.486.889.1
PopularRegular83.981.787.184.3
VCD84.281.787.384.3
VAF86.085.486.886.1
AdversarialRegular76.071.087.178.2
VCD76.471.287.178.3
VAF78.274.186.879.9
GQARandomRegular88.387.889.088.4
VCD88.388.189.388.5
VAF89.787.892.289.9
PopularRegular83.379.889.084.1
VCD83.280.089.284.1
VAF85.283.088.685.7
AdversarialRegular78.573.389.080.4
VCD78.773.388.980.3
VAF80.876.688.682.1
", + "image_path": "b57391157530332b83eb40b4f74b2031f218abfbe584bca644d6cd2c3c90407f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 687, + 555, + 711 + ], + "lines": [ + { + "bbox": [ + 55, + 687, + 555, + 711 + ], + "spans": [ + { + "bbox": [ + 55, + 687, + 555, + 711 + ], + "type": "text", + "content": "Table 9. Experimental results of LLaVA-1.5-13B model on POPE. VAF method achieves the most effective hallucination suppression across all three datasets. For emphasis, the highest scores in each setting are highlighted in red." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 81, + 199, + 160 + ], + "blocks": [ + { + "bbox": [ + 115, + 81, + 199, + 160 + ], + "lines": [ + { + "bbox": [ + 115, + 81, + 199, + 160 + ], + "spans": [ + { + "bbox": [ + 115, + 81, + 199, + 160 + ], + "type": "image", + "image_path": "a65c83187bf6f743d3ed6f698d34b951a870192e87a0223814be4491c320a2c8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 106, + 279, + 136 + ], + "lines": [ + { + "bbox": [ + 211, + 106, + 279, + 136 + ], + "spans": [ + { + "bbox": [ + 211, + 106, + 279, + 136 + ], + "type": "text", + "content": "Prompt: How many uncut fruits are in the image?" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 127, + 179, + 259, + 200 + ], + "lines": [ + { + "bbox": [ + 127, + 179, + 259, + 200 + ], + "spans": [ + { + "bbox": [ + 127, + 179, + 259, + 200 + ], + "type": "text", + "content": "Vanilla Decoding: There are five uncut fruits in the image." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 127, + 227, + 260, + 247 + ], + "lines": [ + { + "bbox": [ + 127, + 227, + 260, + 247 + ], + "spans": [ + { + "bbox": [ + 127, + 227, + 260, + 247 + ], + "type": "text", + "content": "Visual Contrastive Decoding: There are four uncut fruits in the image." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 319, + 81, + 403, + 159 + ], + "blocks": [ + { + "bbox": [ + 319, + 81, + 403, + 159 + ], + "lines": [ + { + "bbox": [ + 319, + 81, + 403, + 159 + ], + "spans": [ + { + "bbox": [ + 319, + 81, + 403, + 159 + ], + "type": "image", + "image_path": "1a904075e7cad436ffd0ce8802753c74ac8c0ab0e5717e2ca6c9dfb9b77aa3ea.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 416, + 106, + 483, + 135 + ], + "lines": [ + { + "bbox": [ + 416, + 106, + 483, + 135 + ], + "spans": [ + { + "bbox": [ + 416, + 106, + 483, + 135 + ], + "type": "text", + "content": "Prompt: How many coffee mugs are in the set?" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 332, + 180, + 465, + 200 + ], + "lines": [ + { + "bbox": [ + 332, + 180, + 465, + 200 + ], + "spans": [ + { + "bbox": [ + 332, + 180, + 465, + 200 + ], + "type": "text", + "content": "Vanilla Decoding: There are four coffee mugs in the set." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 332, + 227, + 463, + 247 + ], + "lines": [ + { + "bbox": [ + 332, + 227, + 463, + 247 + ], + "spans": [ + { + "bbox": [ + 332, + 227, + 463, + 247 + ], + "type": "text", + "content": "Visual Contrastive Decoding: There are three coffee mugs in the set." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 332, + 274, + 463, + 294 + ], + "lines": [ + { + "bbox": [ + 332, + 274, + 463, + 294 + ], + "spans": [ + { + "bbox": [ + 332, + 274, + 463, + 294 + ], + "type": "text", + "content": "Visual Amplification Fusion: There are three coffee mugs in the set." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 115, + 365, + 228, + 472 + ], + "blocks": [ + { + "bbox": [ + 127, + 274, + 259, + 294 + ], + "lines": [ + { + "bbox": [ + 127, + 274, + 259, + 294 + ], + "spans": [ + { + "bbox": [ + 127, + 274, + 259, + 294 + ], + "type": "text", + "content": "Visual Amplification Fusion: There are three uncut fruits in the image." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 320, + 555, + 343 + ], + "lines": [ + { + "bbox": [ + 55, + 320, + 555, + 343 + ], + "spans": [ + { + "bbox": [ + 55, + 320, + 555, + 343 + ], + "type": "text", + "content": "Figure 13. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on numerical perception tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 115, + 365, + 228, + 472 + ], + "lines": [ + { + "bbox": [ + 115, + 365, + 228, + 472 + ], + "spans": [ + { + "bbox": [ + 115, + 365, + 228, + 472 + ], + "type": "image", + "image_path": "d96465a44578b92b95d86a245007c48c89608d96e2b641f2c2af97ae5787d3c4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 666, + 555, + 689 + ], + "lines": [ + { + "bbox": [ + 55, + 666, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 666, + 555, + 689 + ], + "type": "text", + "content": "Figure 14. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on complex reasoning tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 253, + 370, + 386, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 370, + 386, + 380 + ], + "spans": [ + { + "bbox": [ + 253, + 370, + 386, + 380 + ], + "type": "text", + "content": "Prompt: What is unusual about this image?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 257, + 399, + 470, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 399, + 470, + 469 + ], + "spans": [ + { + "bbox": [ + 257, + 399, + 470, + 469 + ], + "type": "text", + "content": "Vanilla Decoding: The unusual aspect of this image is that a man is ironing clothes while standing on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is usually done indoors, in a more controlled environment. The man's actions pose a risk to both himself and the passengers of the taxi, as he could lose his balance and fall, causing injury or damage to the vehicle. Additionally, the presence of a washing machine in the back of the taxi is also unconventional, as it is not a common feature in taxis." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 257, + 487, + 470, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 487, + 470, + 567 + ], + "spans": [ + { + "bbox": [ + 257, + 487, + 470, + 567 + ], + "type": "text", + "content": "Visual Contrastive Decoding: An unconventional aspect of the image is the presence of a man standing on the back of a yellow car while holding multiple clothes, seemingly ironing them. Ironing is typically done in a more controlled and stationary environment, such as a home or an ironing board in an indoor setting. However, in this case, the man is in a busy city driving a yellow taxi, which defies the traditional approach to ironing and creates an unusual scene. It may draw the attention of onlookers and passersby due to the uncommon behavior, making this image memorable." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 257, + 586, + 470, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 586, + 470, + 639 + ], + "spans": [ + { + "bbox": [ + 257, + 586, + 470, + 639 + ], + "type": "text", + "content": "Visual Amplification Fusion: The unusual aspect of this image is that a man is ironing clothes while sitting on the back of a moving yellow taxi. This is not a typical scenario, as ironing clothes is generally done in a more controlled environment, such as a home or a laundromat. The man's position on the back of the taxi, combined with the act of ironing, creates an unconventional and unexpected scene." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 79, + 240, + 175 + ], + "blocks": [ + { + "bbox": [ + 138, + 79, + 240, + 175 + ], + "lines": [ + { + "bbox": [ + 138, + 79, + 240, + 175 + ], + "spans": [ + { + "bbox": [ + 138, + 79, + 240, + 175 + ], + "type": "image", + "image_path": "5e53b49c44c6e0501a0e4aa3ecd37b74a84fb37345c7e5c3fb0250cb93d4c76f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 259, + 85, + 361, + 94 + ], + "lines": [ + { + "bbox": [ + 259, + 85, + 361, + 94 + ], + "spans": [ + { + "bbox": [ + 259, + 85, + 361, + 94 + ], + "type": "text", + "content": "Prompt: Describe this photo in detail." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 262, + 112, + 452, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 112, + 452, + 175 + ], + "spans": [ + { + "bbox": [ + 262, + 112, + 452, + 175 + ], + "type": "text", + "content": "Vanilla Decoding: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is sitting in a chair, looking directly at the viewer. The painting is quite detailed, capturing the dog's facial expression and the intricate design of the dress. In the background, there is a person partially visible on the left side of the painting. The overall scene is quite captivating and unique, as it combines the artistic style of the Renaissance with the subject matter of a dog." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 263, + 193, + 452, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 193, + 452, + 232 + ], + "spans": [ + { + "bbox": [ + 263, + 193, + 452, + 232 + ], + "type": "text", + "content": "Visual Contrastive Decoding: The image features a unique painting on a canvas, depicting a portrait of a bulldog in Renaissance style, giving it a Renaissance era appearance. In this painting, the bulldog is presented as a woman, complete with a veil on its head, looking directly to its right." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 263, + 232, + 452, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 232, + 452, + 279 + ], + "spans": [ + { + "bbox": [ + 263, + 232, + 452, + 279 + ], + "type": "text", + "content": "The painting also includes several elements of landscape and architecture, giving a sense of a real setting. Apart from the central dog figure, there are three smaller figures or figures in the background, adding more depth and life to the painting. In the forefront, a chair is visible, positioned to the left of the dog, further enhancing the painting's depiction of a realistic scene." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 262, + 297, + 451, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 297, + 451, + 328 + ], + "spans": [ + { + "bbox": [ + 262, + 297, + 451, + 328 + ], + "type": "text", + "content": "Visual Amplification Fusion: The image features a painting of a dog wearing a Renaissance-style dress, giving it a human-like appearance. The dog is positioned in the center of the painting, and appears to be staring at the viewer." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 138, + 393, + 239, + 487 + ], + "blocks": [ + { + "bbox": [ + 55, + 349, + 555, + 373 + ], + "lines": [ + { + "bbox": [ + 55, + 349, + 555, + 373 + ], + "spans": [ + { + "bbox": [ + 55, + 349, + 555, + 373 + ], + "type": "text", + "content": "Figure 15. An illustration of VAF correcting hallucinations on LLaVA-Bench, with a focus on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 138, + 393, + 239, + 487 + ], + "lines": [ + { + "bbox": [ + 138, + 393, + 239, + 487 + ], + "spans": [ + { + "bbox": [ + 138, + 393, + 239, + 487 + ], + "type": "image", + "image_path": "88514ab10ec1b0add4b169678541b00295fc37deda1c2ba83f677b1b7cc5202d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 259, + 397, + 361, + 406 + ], + "lines": [ + { + "bbox": [ + 259, + 397, + 361, + 406 + ], + "spans": [ + { + "bbox": [ + 259, + 397, + 361, + 406 + ], + "type": "text", + "content": "Prompt: Describe this photo in detail." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 672, + 555, + 696 + ], + "lines": [ + { + "bbox": [ + 55, + 672, + 555, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 555, + 696 + ], + "type": "text", + "content": "Figure 16. Additional example of VAF correcting model hallucinations on LLaVA-Bench, primarily focusing on image description tasks. Correct outputs are highlighted in green, while incorrect ones are marked in red." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 263, + 425, + 453, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 425, + 453, + 495 + ], + "spans": [ + { + "bbox": [ + 263, + 425, + 453, + 495 + ], + "type": "text", + "content": "Vanilla Decoding: The image features a beautiful display of three ripe and juicy passion fruit, cut in half to reveal their exquisite white meat inside. The ripe fruit is placed in a wooden bowl, making them the focal point of the image. Each fruit half rests in a different location on the table, showcasing the various sizes of these unique fruit-like berries. The close-up view of the cut open fruits and the table setting creates an appetizing scene, highlighting the natural color of the passion fruit and its appeal to anyone who enjoys a fresh and tangy experience with a hint of exotic taste." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 264, + 512, + 453, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 512, + 453, + 590 + ], + "spans": [ + { + "bbox": [ + 264, + 512, + 453, + 590 + ], + "type": "text", + "content": "Visual Contrastive Decoding: The image features a wooden table with a basket of fruit, including a variety of fruits such as apples, oranges, and bananas. There are three apples on the table, with one placed near the left side, another in the middle, and the third one on the right side. In addition to the apples, there are two oranges on the table, one located in the middle and the other towards the right side. A banana can be seen on the left side of the table, and a cluster of grapes is also present, with some grapes placed near the center and others on the right side of the table. The assortment of fruits creates a colorful and appetizing display." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 265, + 605, + 453, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 605, + 453, + 652 + ], + "spans": [ + { + "bbox": [ + 265, + 605, + 453, + 652 + ], + "type": "text", + "content": "Visual Amplification Fusion: The image features a wooden table with a basket of four purple fruits, possibly eggplants or figs, placed on it. The fruits are cut open, revealing their inwards. The fruits are arranged in a way that they are sitting on top of each other, with one fruit on the left side, another in the middle, and the third on the right side of the table. The fourth fruit is placed at the edge of the table." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 72, + 289, + 245 + ], + "blocks": [ + { + "bbox": [ + 59, + 72, + 289, + 245 + ], + "lines": [ + { + "bbox": [ + 59, + 72, + 289, + 245 + ], + "spans": [ + { + "bbox": [ + 59, + 72, + 289, + 245 + ], + "type": "image", + "image_path": "186b4777820d03711bd9a905a5233c8f91c9c5ca8d2c8ed43bec3792005c511b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 251, + 208, + 262 + ], + "lines": [ + { + "bbox": [ + 140, + 251, + 208, + 262 + ], + "spans": [ + { + "bbox": [ + 140, + 251, + 208, + 262 + ], + "type": "text", + "content": "(a) Accuracy Metric" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 323, + 72, + 549, + 245 + ], + "blocks": [ + { + "bbox": [ + 323, + 72, + 549, + 245 + ], + "lines": [ + { + "bbox": [ + 323, + 72, + 549, + 245 + ], + "spans": [ + { + "bbox": [ + 323, + 72, + 549, + 245 + ], + "type": "image", + "image_path": "792b6994a955d9a584b4ffe2d499b18844b6c1008aa9d461b3041cfa70c08cb0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 251, + 470, + 262 + ], + "lines": [ + { + "bbox": [ + 403, + 251, + 470, + 262 + ], + "spans": [ + { + "bbox": [ + 403, + 251, + 470, + 262 + ], + "type": "text", + "content": "(b) F1-Score Metirc" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 325, + 214, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 325, + 214, + 338 + ], + "spans": [ + { + "bbox": [ + 55, + 325, + 214, + 338 + ], + "type": "text", + "content": "8.4. Case study on LLaVA-Bench" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 342, + 296, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 342, + 296, + 426 + ], + "spans": [ + { + "bbox": [ + 55, + 342, + 296, + 426 + ], + "type": "text", + "content": "Fig. 13, Fig. 14, Fig. 15, and Fig. 16 illustrate the effectiveness of various methods in mitigating model hallucinations on LLaVA-Bench. Across tasks such as numerical perception, image description, and complex reasoning, our approach demonstrates consistently superior performance in suppressing hallucinations. Experiments are conducted using LLaVA-v1.5-7B model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 438, + 214, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 438, + 214, + 451 + ], + "spans": [ + { + "bbox": [ + 55, + 438, + 214, + 451 + ], + "type": "text", + "content": "9. Additional Ablation Studies" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 459, + 296, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 459, + 296, + 544 + ], + "spans": [ + { + "bbox": [ + 55, + 459, + 296, + 544 + ], + "type": "text", + "content": "In Sec. 9.1, we examine how enhancing attention to visual features at different levels affects hallucination suppression. In Sec. 9.2, we analyze the influence of varying the suppression coefficient " + }, + { + "bbox": [ + 55, + 459, + 296, + 544 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 55, + 459, + 296, + 544 + ], + "type": "text", + "content": " on mitigating hallucinations. Finally, in Sec. 9.3, we evaluate the performance of the VAF method in suppressing hallucinations under various sampling strategies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 552, + 277, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 552, + 277, + 565 + ], + "spans": [ + { + "bbox": [ + 55, + 552, + 277, + 565 + ], + "type": "text", + "content": "9.1. Effect of Enhancement at Different Layers" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": "We enhanced attention to visual features in layers 0-5, 10-15, and 20-25. Fig. 17 demonstrates the impact of enhancing visual attention at different layers. Notably, enhancing attention in the middle layers significantly reduces hallucination, while modifications in the shallow and deep layers have minimal effect on the generation results. As discussed in Sec. 4.1, this is because the model primarily integrates modality information in the middle layers. Thus, enhancing the focus on visual features during this phase is crucial for effectively mitigating hallucination. Experiments are conducted using LLaVA-v1.5-7B model on COCO-Random dataset from the POPE Benchmark." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 332, + 327, + 538, + 485 + ], + "blocks": [ + { + "bbox": [ + 55, + 270, + 555, + 304 + ], + "lines": [ + { + "bbox": [ + 55, + 270, + 555, + 304 + ], + "spans": [ + { + "bbox": [ + 55, + 270, + 555, + 304 + ], + "type": "text", + "content": "Figure 17. The Effect of Enhancing Visual Attention at Different Layers on Prediction Accuracy. This experiment, conducted with the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark, demonstrates that enhancing attention to visual features in the model's middle layers significantly reduces hallucinations." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 332, + 327, + 538, + 485 + ], + "lines": [ + { + "bbox": [ + 332, + 327, + 538, + 485 + ], + "spans": [ + { + "bbox": [ + 332, + 327, + 538, + 485 + ], + "type": "image", + "image_path": "ffcc6e9e895eff01f780f8bd05da7688628711fa34ba7e6cfa9ed07eea6550d6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 496, + 555, + 541 + ], + "lines": [ + { + "bbox": [ + 313, + 496, + 555, + 541 + ], + "spans": [ + { + "bbox": [ + 313, + 496, + 555, + 541 + ], + "type": "text", + "content": "Figure 18. The effect of the suppression coefficient " + }, + { + "bbox": [ + 313, + 496, + 555, + 541 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 496, + 555, + 541 + ], + "type": "text", + "content": " on the VAF method's ability to mitigate model hallucinations. The experiments were performed using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 563, + 491, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 563, + 491, + 576 + ], + "spans": [ + { + "bbox": [ + 313, + 563, + 491, + 576 + ], + "type": "text", + "content": "9.2. Effect of Suppression Coefficient" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": "We assessed the effect of the suppression coefficient " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " on the performance of the VAF method using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark. In our experiments, " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " was fixed at 0.15, while " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " was systematically adjusted. The results, presented in Fig. 18, reveal that when " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "0 < \\beta < 0.15" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": ", VAF significantly enhanced its ability to suppress hallucinations in the model. This improvement is likely due to VAF reducing redundant attention to system prompts in this range, thereby reinforcing focus on visual features and enabling generated content to better align with the visual input. Conversely," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 70, + 473, + 270 + ], + "blocks": [ + { + "bbox": [ + 143, + 70, + 473, + 270 + ], + "lines": [ + { + "bbox": [ + 143, + 70, + 473, + 270 + ], + "spans": [ + { + "bbox": [ + 143, + 70, + 473, + 270 + ], + "type": "table", + "html": "
Sampling StrategyMethodAccuracyPrecisionRecallF1-Score
GreedyRegular88.294.481.487.4
VAF89.892.986.289.4
Direct SamplingRegular82.990.471.380.9
VAF83.990.680.985
Top PRegular84.392.172.582.1
VAF85.789.682.485.9
Top KRegular83.391.972.881.1
VAF8588.381.984.9
Top K + Temp0.5Regular85.595.174.984.5
VAF86.791.283.487
Top K + Temp1.5Regular80.487.170.277.8
VAF82.18678.281.9
", + "image_path": "0b87569274e178e6f9394d30e1099b4c2100d12bbd967c3a449cc44082e27c39.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 278, + 555, + 312 + ], + "lines": [ + { + "bbox": [ + 55, + 278, + 555, + 312 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 555, + 312 + ], + "type": "text", + "content": "Table 10. Effectiveness of the VAF method in mitigating model hallucination under different sampling strategies. The highest score in each setting is highlighted in red. Experiments were conducted using the LLaVA-v1.5-7B model on the COCO-Random dataset within the POPE Benchmark." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 332, + 296, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 332, + 296, + 405 + ], + "spans": [ + { + "bbox": [ + 55, + 332, + 296, + 405 + ], + "type": "text", + "content": "when " + }, + { + "bbox": [ + 55, + 332, + 296, + 405 + ], + "type": "inline_equation", + "content": "\\beta > 0.15" + }, + { + "bbox": [ + 55, + 332, + 296, + 405 + ], + "type": "text", + "content": ", the model's performance deteriorated. We hypothesize that this decline stems from excessive suppression of attention to system prompts, which disrupts the delicate balance required for effectively integrating multimodal information, ultimately leading to a degradation in overall performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 414, + 260, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 414, + 260, + 428 + ], + "spans": [ + { + "bbox": [ + 55, + 414, + 260, + 428 + ], + "type": "text", + "content": "9.3. Effect of Different Sampling Strategies" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 433, + 296, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 433, + 296, + 517 + ], + "spans": [ + { + "bbox": [ + 55, + 433, + 296, + 517 + ], + "type": "text", + "content": "We evaluated the effectiveness of the VAF method in mitigating model hallucination under different sampling strategies using the LLaVA-v1.5-7B model on the COCO-Random dataset from the POPE Benchmark. The experimental results, shown in Tab. 10, indicate that the VAF method significantly mitigates model hallucination across all sampling strategies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 529, + 220, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 529, + 220, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 529, + 220, + 544 + ], + "type": "text", + "content": "10. Prompts for Different Tasks" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 550, + 296, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 550, + 296, + 588 + ], + "spans": [ + { + "bbox": [ + 55, + 550, + 296, + 588 + ], + "type": "text", + "content": "POPE Dataset. In the POPE dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 608, + 280, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 608, + 280, + 646 + ], + "spans": [ + { + "bbox": [ + 70, + 608, + 280, + 646 + ], + "type": "text", + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 650, + 144, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 650, + 144, + 661 + ], + "spans": [ + { + "bbox": [ + 71, + 650, + 144, + 661 + ], + "type": "text", + "content": "USER: IMAGE" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 662, + 279, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 662, + 279, + 686 + ], + "spans": [ + { + "bbox": [ + 107, + 662, + 279, + 686 + ], + "type": "text", + "content": "Is there a cow in the image? Please just answer yes or no." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 72, + 691, + 132, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 691, + 132, + 702 + ], + "spans": [ + { + "bbox": [ + 72, + 691, + 132, + 702 + ], + "type": "text", + "content": "ASSISTANT:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 332, + 555, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 555, + 369 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 555, + 369 + ], + "type": "text", + "content": "Nocaps Datasets. In Nocaps and Flickr30k dataset, input template for the model is presented below, with prompts highlighted in green and image highlighted in red." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 329, + 388, + 539, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 388, + 539, + 425 + ], + "spans": [ + { + "bbox": [ + 329, + 388, + 539, + 425 + ], + "type": "text", + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 330, + 430, + 403, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 430, + 403, + 441 + ], + "spans": [ + { + "bbox": [ + 330, + 430, + 403, + 441 + ], + "type": "text", + "content": "USER: IMAGE" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 365, + 442, + 538, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 442, + 538, + 466 + ], + "spans": [ + { + "bbox": [ + 365, + 442, + 538, + 466 + ], + "type": "text", + "content": "Provide a one-sentence caption for the provided image." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 331, + 472, + 390, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 472, + 390, + 483 + ], + "spans": [ + { + "bbox": [ + 331, + 472, + 390, + 483 + ], + "type": "text", + "content": "ASSISTANT:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 504, + 555, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 504, + 555, + 541 + ], + "spans": [ + { + "bbox": [ + 313, + 504, + 555, + 541 + ], + "type": "text", + "content": "Sci-VQA Dataset. In the Sci-VQA dataset, input template for the model is presented below, with the prompts highlighted in green and the image highlighted in red." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 329, + 560, + 539, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 560, + 539, + 597 + ], + "spans": [ + { + "bbox": [ + 329, + 560, + 539, + 597 + ], + "type": "text", + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 331, + 602, + 403, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 602, + 403, + 613 + ], + "spans": [ + { + "bbox": [ + 331, + 602, + 403, + 613 + ], + "type": "text", + "content": "USER: IMAGE" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 365, + 614, + 503, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 614, + 503, + 624 + ], + "spans": [ + { + "bbox": [ + 365, + 614, + 503, + 624 + ], + "type": "text", + "content": "Context: Select the best answer." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 365, + 626, + 538, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 626, + 538, + 648 + ], + "spans": [ + { + "bbox": [ + 365, + 626, + 538, + 648 + ], + "type": "text", + "content": "Which property do these three objects have in common?" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 365, + 650, + 491, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 650, + 491, + 662 + ], + "spans": [ + { + "bbox": [ + 365, + 650, + 491, + 662 + ], + "type": "text", + "content": "A. shiny B. slippery C. opaque" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 365, + 662, + 538, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 662, + 538, + 685 + ], + "spans": [ + { + "bbox": [ + 365, + 662, + 538, + 685 + ], + "type": "text", + "content": "Answer with the option's letter from the given choices directly." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 331, + 691, + 390, + 702 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 691, + 390, + 702 + ], + "spans": [ + { + "bbox": [ + 331, + 691, + 390, + 702 + ], + "type": "text", + "content": "ASSISTANT:" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_content_list.json b/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f9b6afed0c070edd3d729b07909b6d779793cbd2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_content_list.json @@ -0,0 +1,5368 @@ +[ + { + "type": "text", + "text": "Logic-in-Frames: Dynamic Keyframe Search via Visual Semantic-Logical Verification for Long Video Understanding", + "text_level": 1, + "bbox": [ + 225, + 122, + 776, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weiyu Guo Ziyang Chen Shaoguang Wang Jianxiang He Yijie Xu AI Thrust, HKUST(GZ)", + "bbox": [ + 235, + 237, + 764, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{wguo395, zchen483, swang440, jhe307, yxu409}@connect.hkust-gz.edu.cn", + "bbox": [ + 230, + 267, + 764, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinhui Ye", + "bbox": [ + 344, + 282, + 413, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shanghai AI Laboratory", + "bbox": [ + 297, + 297, + 459, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "jinhuiyes@gmail.com", + "bbox": [ + 305, + 313, + 454, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ying Sun* Hui Xiong*", + "bbox": [ + 519, + 282, + 689, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AI Thrust, HKUST(GZ)", + "bbox": [ + 524, + 297, + 686, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yings, xionghui}@ust.hk", + "bbox": [ + 513, + 311, + 699, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 335, + 537, + 351 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Understanding long video content is a complex endeavor that often relies on densely sampled frame captions or end-to-end feature selectors, yet these techniques commonly overlook the logical relationships between textual queries and visual elements. In practice, computational constraints necessitate coarse frame subsampling, a challenge analogous to \"finding a needle in a haystack.\" To address this issue, we introduce a semantics-driven search framework that reformulates keyframe selection under the paradigm of Visual Semantic-Logical Search. Specifically, we systematically define four fundamental logical dependencies: 1) spatial co-occurrence, 2) temporal proximity, 3) attribute dependency, and 4) causal order. These relations dynamically update frame sampling distributions through an iterative refinement process, enabling context-aware identification of semantically critical frames tailored to specific query requirements. Our method establishes new SOTA performance on the manually annotated benchmark in key-frame selection metrics. Furthermore, when applied to downstream video question-answering tasks, the proposed approach demonstrates the best performance gains over existing methods on LONGVIDEOBENCH and VIDEO-MME, validating its effectiveness in bridging the logical gap between textual queries and visual-temporal reasoning. The code will be publicly available.", + "bbox": [ + 228, + 359, + 769, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 618, + 313, + 633 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vision-Language Models (VLMs) Yin et al. (2024) have achieved remarkable progress in video understanding Zou et al. (2024); Tang et al. (2023), particularly in video question answering Wang et al. (2024c); Zhang et al. (2023), demonstrating potential for modeling real-world scenarios. However, existing methods can only simultaneously process a limited number of frames due to the inherent token limit and extremely high dimension of spatio-temporal video data, especially for long videos. Furthermore, uniformly sampled keyframes are query-agnostic and insufficient to represent query-related contents. To tackle these challenges, this paper addresses a pivotal research question:", + "bbox": [ + 169, + 642, + 826, + 742 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How can we efficiently and accurately select keyframes that are semantically critical for answering video-based queries?", + "bbox": [ + 228, + 746, + 767, + 773 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We hypothesize that deconstructing visual semantic and logical cues (e.g., target objects, logical relations including temporal, spatial, attribute, and causal relationships between visual entities) from textual queries enables effective identification of task-relevant frames through heuristic sampling and search. Building on this insight, we propose Visual Semantic-Logical Search (VSLS), a novel keyframe search framework that incorporates target object confidence estimation and joint verification of visual semantic logic into the iterative update of frame sampling distribution and selects the most informative frames with the highest confidence. Experimental results show that our", + "bbox": [ + 169, + 780, + 826, + 878 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding authors.", + "bbox": [ + 191, + 886, + 339, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13139v2 [cs.CV] 17 May 2025", + "bbox": [ + 22, + 255, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint.", + "bbox": [ + 171, + 922, + 227, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Q: In the video, what color pen did the author use when he wrote", + "bbox": [ + 194, + 101, + 477, + 111 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "```\n\\\"guitar\\\" for the second time?", + "bbox": [ + 184, + 111, + 405, + 119 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/953c9d83f6870c26dad9968e0c39a6e9242291e0ea0bf567b0874126d0a19571.jpg", + "image_caption": [ + "Temporal", + "A) Brown", + "Spatial" + ], + "image_footnote": [], + "bbox": [ + 176, + 128, + 331, + 185 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ac70dfed2f93f2e57ec2e765bb99ce5996cc84efe94f743f0640b0f5b681d414.jpg", + "image_caption": [ + "(text, time, pen)", + "B) Pink", + "(copilot, spatial, Egyptian Pyramids)" + ], + "image_footnote": [], + "bbox": [ + 344, + 128, + 496, + 185 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q:At the end of the animation, which building does the airplane fly over?", + "bbox": [ + 176, + 199, + 495, + 209 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A) The Eiffel Tower.", + "bbox": [ + 184, + 209, + 274, + 217 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B) The Egyptian Pyramids", + "bbox": [ + 349, + 209, + 457, + 217 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2cbbe9459cd07a11b8b6cf63240b6eb53b964cbf2cfd943bf25bbe734517e78c.jpg", + "image_caption": [ + "RED—Baseline Answer" + ], + "image_footnote": [], + "bbox": [ + 176, + 223, + 333, + 333 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/38acc18505c76dd154f63dcc30a60695263952fd5e1c996c5dbaf6981479fc57.jpg", + "image_caption": [ + "Our Answer", + "Figure 1: Examples of four types of visual semantic-logical relationships in video QA detected by our VSLS framework: Temporal (text, time, pen), Attribute (man, attribute, white shirt), Spatial (copilot, spatial, Egyptian Pyramids), and Causal (man, causal, basketball). Green boxes indicate correct answers, while red boxes show baseline errors." + ], + "image_footnote": [], + "bbox": [ + 341, + 223, + 498, + 334 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q: In a room with a wall tiger and a map on the wall, there is a man wearing a white shirt. What is he doing?", + "bbox": [ + 519, + 99, + 803, + 118 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A) gazing at a circuit board", + "bbox": [ + 508, + 119, + 625, + 127 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ea37ec53eb15b2a0ff0e8bdea1cc5566bbdc337da0a7d13b1c404d2c4da49c1e.jpg", + "image_caption": [ + "Attribute", + "Causal" + ], + "image_footnote": [], + "bbox": [ + 503, + 128, + 656, + 185 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c2bb1f52e9e19170d0c025e96d90330f57871bc152ba1bbb27307286e8aafd7e.jpg", + "image_caption": [ + "(man, attribute, white shirt)", + "B) speaking", + "(man, causal, basketball)" + ], + "image_footnote": [], + "bbox": [ + 658, + 128, + 818, + 186 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Q:After a man wearing a red short-sleeved shirt and a black hat finished", + "bbox": [ + 504, + 196, + 818, + 205 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "speaking in front of a black background, what did this me", + "bbox": [ + 517, + 205, + 769, + 213 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A) picked up a mobile phone.", + "bbox": [ + 506, + 213, + 630, + 222 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/94231a5d2a0704895637946eabe85545b336071375abcfbbf71d4801fdb3ce38.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 223, + 658, + 334 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c61ac3848f91015fed47389a4b36e33e2e9c4471568eacab710f50bb62574e37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 223, + 818, + 334 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "approach requires only sparse sampling (1.4% of frames per video on average) to identify critical frames, significantly reducing computational complexity compared to conventional dense sampling strategies while maintaining performance on downstream video understanding tasks.", + "bbox": [ + 169, + 424, + 823, + 467 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Compared to conventional methods, VSLS shows three distinct advantages. First, the framework is training-free and highly efficient in comparison with dense captioning Chen et al. (2024c); Kim et al. (2024); Wang et al. (2024b) or video clustering Wang et al. (2024e); Rajan and Parameswaran (2025) strategies, sampling only $1.4\\%$ of frames on average in LVHAYSTACK. Second, it explicitly models logical binary relations (namely spatial, temporal, attribute, and causal) in the query beyond simple target detection Ye et al. (2025b), utilizing additional visual semantic features and enhancing logical consistency throughout the reasoning process. Third, VSLS is a plug-and-play module, which can be seamlessly integrated into existing VLM pipelines without cross-component dependencies.", + "bbox": [ + 169, + 472, + 826, + 584 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We further examine VSLS on several public datasets, including LONGVIDEOBENCH Ye et al. (2025a), a comprehensive benchmark for long video understanding; VIDEO-MME Fu et al. (2024), a widely adopted multimodal video question answering dataset; and HAYSTACK-LVBENCH Ye et al. (2025a) with meticulously annotated keyframes based on human feedback for more precise analysis. Extensive experiments demonstrate significant improvements in both the semantic similarity and temporal coverage between the retrieved keyframes and the ground truth labels, as well as the accuracy in downstream video question-answering tasks. More importantly, with only $1.4\\%$ of video frames (EGO4D Grauman et al. (2022)) sampled in the search iteration, our method achieves an $8.7\\%$ improvement in GPT-4o Hurst et al. (2024)'s long video QA accuracy. This performance gain is attributed to our simple yet powerful observation: query-guided visual semantic logic retrieval can mitigate the gap between potential visual logic in video frames and the logic expressed in the query. To be specific, constructing ternary logic triplets with visual elements (e.g., object1, logic type, object2) can enhance downstream reasoning capabilities when performing textual-visual retrieval.", + "bbox": [ + 169, + 590, + 826, + 771 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To the best of our knowledge, we are arguably the first to search for keyframes in long videos by detecting visual semantic logic, with potential extensions to other textual-visual retrieval tasks. Our main contributions are as follows:", + "bbox": [ + 169, + 776, + 823, + 816 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We define four fundamental types of semantic logic relations in video QA tasks, including temporal, causal, attribute, and spatial relations, which can be accurately detected across various datasets.", + "- We sample only $1.4\\%$ of frames on average of frames on average during keyframe search through heuristic sampling and distribution updating by different visual semantics and logical relations.", + "- We comprehensively evaluate retrieval efficiency, semantic similarity, temporal coverage, and video question answering accuracy across several widely used video understanding datasets, demonstrating significant improvements in downstream tasks." + ], + "bbox": [ + 173, + 823, + 825, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b122957dd055813357d936dff056404604c37dce8079ebfdf60a5643e31f325f.jpg", + "image_caption": [ + "Figure 2: Our VSLS Framework for Efficient Keyframe Selection. VSLS sparsely samples frames and selects key ones via object detection and logic verification. Steps: 1) Use LLM&VLM to extract cue/target objects and four logic types (spatial, temporal, attribute, causal); 2) Adaptive sampling with evolving confidence; 3) Detect objects viaYOLO-WORLD; 4) Fuse scores with a spline function to identify high-confidence frames for downstream tasks." + ], + "image_footnote": [], + "bbox": [ + 174, + 88, + 823, + 271 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Method", + "text_level": 1, + "bbox": [ + 171, + 347, + 272, + 363 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Although existing long-context VLM frameworks implement keyframe search for video QA tasks Liang et al. (2024); Park et al. (2024); Tan et al. (2024); Wang et al. (2024a,d); Yu et al. (2024), their computational efficiency and searching accuracy remain suboptimal. To address this needle-in-a-haystack challenge Wang et al. (2025); Zhao et al. (2024), we propose a novel method VSLS that aligns the semantic relations between the text modality and video modality, enhancing the plausibility of logical reasoning and performance of downstream tasks.", + "bbox": [ + 169, + 369, + 826, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Task Formulation", + "text_level": 1, + "bbox": [ + 171, + 463, + 338, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a video sequence $V = \\{f_t\\}_{t=1}^{N_v}$ with $N_v$ frames and a query $Q$ , the ideal temporal search framework aims to retrieve the minimal keyframe subset $V^K = \\{f_{m_i}\\}_{i=1}^K \\subseteq V$ with $K$ keyframes that satisfies:", + "bbox": [ + 169, + 479, + 823, + 523 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Conservation: The keyframe subset $V^K \\subseteq V$ must satisfy the answer consistency condition: $\\mathcal{A}(V^K, Q) = \\mathcal{A}(V, Q)$ , where $\\mathcal{A}(\\cdot)$ denotes the video QA function.", + "- Compactness: $V^K$ must be a minimal subset that preserves completeness, which means that no frame in $V^K$ can be removed without hindering the accuracy and efficiency of video QA." + ], + "bbox": [ + 171, + 529, + 825, + 587 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Visual Semantic Logic Extraction", + "text_level": 1, + "bbox": [ + 171, + 595, + 446, + 609 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Starting from a question $Q$ and uniformly sampled frames $\\overline{V}_N$ from video $V$ , our goal is to extract key visual elements to answer $Q$ . We first classify the detected objects in $Q$ and $\\overline{V}_N$ into two categories:", + "bbox": [ + 169, + 614, + 826, + 646 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Key Objects: The main participants or references in the scene that the question explicitly or implicitly focuses on (e.g., \"person\", \"microphone\").", + "- Cue Objects: Secondary or contextual entities that help locate or disambiguate the Key Objects (e.g., \"book\", \"tiger painting\")." + ], + "bbox": [ + 171, + 648, + 825, + 707 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To further leverage semantic and logical links among these objects, we define a set of relations $\\mathcal{R} \\subseteq \\mathcal{O} \\times \\Delta \\times \\mathcal{O}$ , where each relation $r = (o_i, \\delta, o_j) \\in \\mathcal{R}$ , with $o_i, o_j \\in \\mathcal{O}$ denoting detected objects in the key and cue objects dataset, and $\\delta \\in \\Delta$ representing one of the following types of relations:", + "bbox": [ + 169, + 710, + 823, + 753 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/d47a19ff9a05498992989851401417220f3bb6fb49d58def00dde2c82c804a66.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Spatial Co-occurrenceAttribute Dependency
oi and oj appear in the same frame, indicating co-occurrence or proximity. \nExample: “A person is standing beside a vase.” \n⇒ (person, spatial, vase)oi and oj share visual properties, e.g., color or size. \nExample: “A person wears a black shirt.” ⇒ \n(person, attribute, black shirt)
Temporal ProximityCausal Order
oi and oj occur in close frames, linking sequences or transitions. \nExample: “After a dog entered the room, a cat entered.” ⇒ (dog, temporal, cat)oi and oj follow a cause-effect or prerequisite order. \nExample: “A little girl broke the vase.” ⇒ \n(little girl, causal, pieces)
", + "bbox": [ + 173, + 756, + 818, + 909 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Visual Semantic-Logical Search" + ], + "code_body": "Function SemanticLogicalTemporalSearch(V,Q,K, $\\Delta_t,\\tau ,\\alpha ,\\gamma$ $\\mathcal{O},\\mathcal{R}\\gets$ ParseQuestion(Q) // Extract key/cue objects and relations \n $P\\leftarrow$ Uniform, $B\\leftarrow |V|,S\\leftarrow \\emptyset ,N_{v}\\leftarrow |V|$ // Initialize distribution and state \nwhile $B > 0$ and $|\\mathcal{O}| > 0$ do \n $k\\gets \\lfloor \\sqrt{B}\\rfloor ,G\\gets$ Grid(Sample $(P,k^2)$ ) // Adaptive grid sampling \n $\\Omega \\gets$ DetectObjects(G) // Detect objects in sampled frames \nforeach $t\\in G$ do \n $C_t\\gets$ CalculateBaseScore( $\\Omega_t$ ) // Base detection confidence \nforeach $r_{type}\\in \\mathcal{R}$ do \n $\\delta \\gets$ Processrelation(rtype, $\\Omega ,\\Delta_t,\\tau ,\\alpha ,\\gamma)$ //relations require distinct processing \n $C_t\\gets C_t + \\delta$ UpdateScores $(S,t,C_t)$ //Update global score registry \nDiffuseScores(S,w) // Temporal context propagation \n $P\\gets$ NormalizeDistribution(S), $B\\gets B - k^{2}$ // Update sampling distribution \nforeach $g\\in \\mathrm{TopK}(S,K)$ do \nif $\\Omega [g]\\cap \\mathcal{O}\\neq \\emptyset$ then // Remove identified key objects \n $\\begin{array}{rlrl} & {\\mathcal{O}} & {\\leftarrow \\mathcal{O}\\backslash \\Omega [g]} & {} \\end{array}$", + "bbox": [ + 173, + 111, + 823, + 368 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The choice of these four relations draws on core concepts in linguistics and logic Cohen (1968); Sowa (2000); Talmy (2000), which identify spatial, temporal, attributive, and causal aspects as fundamental for structuring, perceiving, and communicating information about events and states. For more details on this selection, please see appendix A for reference. As shown in Figure 1, we construct semantic-logical relations that support a broad range of question-answering tasks. Specifically, questions involving temporal queries (when does $X$ happen?), causal reasoning (why did $Y$ occur?\"), attribute dependence (What is the person wearing sunglasses doing?), or spatial constraints (Who is standing next to the red car?) can be answered more reliably by incorporating these structured relations and contextual cues.", + "bbox": [ + 169, + 398, + 826, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Iterative Semantic-Logical Temporal Search", + "text_level": 1, + "bbox": [ + 171, + 535, + 522, + 550 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on the extracted key and cue objects and their logic relations, our algorithm iteratively searches for keyframes through semantic and logical reasoning, including four main stages: Frame Sampling (Sec. 2.3.1), Object Detection and Scoring (Sec. 2.3.2), Visual Semantic Logic Detection (Sec. 2.3.3), and Distribution Update (Sec. 2.3.4). The pseudocode is shown in Algorithm 1, and Algorithm 2 provides a more detailed version.", + "bbox": [ + 169, + 554, + 823, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3.1 Frame Sampling", + "text_level": 1, + "bbox": [ + 171, + 633, + 344, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To accelerate the search process, we avoid exhaustively scanning all $N_v$ video frames and instead employ a distributed sampling strategy. Let $N_v$ denote the total number of frames in the video, and $P$ be a uniformly initialized sampling distribution over all frames. The sampling process is then defined as:", + "bbox": [ + 169, + 651, + 823, + 705 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nI _ {s} = \\operatorname {S a m p l e} \\left(P \\odot N _ {v}, N _ {s}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 705, + 823, + 722 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathrm{Sample}(\\cdot ,N_s)$ selects a subset of $N_{s}$ frames according to the distribution $P\\odot N_v$ . To further leverage the detecting ability ofYOLO, we stack the sampled frames into a $k\\times k$ grid, which imposes a constraint on the sample size $N_{s}$ . Specifically, we require:", + "bbox": [ + 169, + 726, + 823, + 768 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nN _ {s} \\in \\{k ^ {2} \\mid k \\in \\mathbb {Z} \\} \\quad \\text {a n d} \\quad N _ {s} < N _ {v}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 775, + 823, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In practice, this ensures that the number of sampled frames can be reshaped into a compact 2D grid for efficient processing. Although $P$ is initially uniform, it can be adapted over multiple rounds of sampling to focus on frames of higher interest in the video.", + "bbox": [ + 169, + 800, + 825, + 843 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3.2 Object Detection and Scoring", + "text_level": 1, + "bbox": [ + 171, + 851, + 431, + 867 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this stage, we construct the detection search space by taking the union of both key objects and cue objects. For each iteration, we detect objects on the $N_{s}$ sampled frames using a lightweight model like YOLO-WORLD Cheng et al. (2024a) for high efficiency and score the frames based on detection", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f4ce9972802824b1e904f47e93a3d933772e13b6530cb6d80f8fb6b800e4bad8.jpg", + "image_caption": [ + "Figure 3: Sample weight evolution under VSLS optimization for keyframe selection. Top: 16 iterations show progressive convergence toward Ground Truth (red). Bottom: 15 iterations demonstrate similar alignment. Yellow highlights indicate precise matches between algorithm outputs (green) and manual annotations." + ], + "image_footnote": [], + "bbox": [ + 171, + 92, + 823, + 229 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "confidence. Specifically, let $\\Omega_t$ be the set of detected objects in the frame at time $t$ , $c_o$ the confidence of each detected object, and $w_o$ the corresponding weight. We define the frame score as:", + "bbox": [ + 169, + 301, + 823, + 333 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {t} = \\max _ {o \\in \\Omega_ {t}} \\left(c _ {o} \\cdot w _ {o}\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 428, + 338, + 825, + 362 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "If the confidence score of any key object exceeds a predefined threshold, it is added to a list, thereby maintaining a record of frames where crucial targets have been identified for subsequent processing.", + "bbox": [ + 169, + 369, + 826, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3.3 Visual Semantic Logic Detection", + "text_level": 1, + "bbox": [ + 171, + 407, + 452, + 422 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Beyond individual object detection and frame-level scoring, we refine each frame's confidence score by modeling higher-order object relations. Let $\\mathcal{R}$ be the set of relations, where each $r\\in \\mathcal{R}$ involves a pair $(o_1,o_2)$ and is labeled by a type $r_{\\mathrm{type}}$ . Denote $C_t$ as the confidence score at time $t$ , with a global scaling factor $\\alpha$ and a relation-specific weight $\\gamma_{r_{\\mathrm{type}}}$ controlling each logic type's impact. The refined confidence $C_t^{(r)}$ after applying relation $r$ is defined as:", + "bbox": [ + 169, + 426, + 823, + 502 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {t} ^ {(r)} = C _ {t} + \\alpha \\cdot \\gamma_ {r _ {\\text {t y p e}}}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 510, + 825, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Spatial Relation. A spatial relation enforces that two objects $o_1$ and $o_2$ must co-occur in the same frame. Let $\\Omega_t$ be the set of detected objects in frame $t$ . If both $o_1 \\in \\Omega_t$ and $o_2 \\in \\Omega_t$ , then the corresponding frame confidence is updated as:", + "bbox": [ + 169, + 537, + 823, + 580 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {t} \\leftarrow C _ {t} + \\alpha \\cdot \\gamma_ {\\text {s p a t i a l}}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 589, + 825, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Attribute Relation. An attribute relation is satisfied when $o_1$ and $o_2$ share sufficient bounding-box overlap in the same frame. Let overlap be the ratio of their intersection area to the minimum of their individual bounding-box areas. If the overlap ratio exceeds a predefined threshold $\\tau$ ( $\\tau = 0.5$ in our experimental setting), we increase the frame confidence:", + "bbox": [ + 169, + 613, + 823, + 671 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {t} \\leftarrow C _ {t} + \\alpha \\cdot \\gamma_ {\\text {a t t r i b u t e}}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 679, + 825, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Time Relation. A time relation checks whether two objects appear in temporally close frames. Suppose $t_i$ and $t_j$ ( $t_i \\leq t_j$ ) are sampled such that $|t_j - t_i| < \\Delta_t$ , where $\\Delta_t$ is a threshold (e.g. 5 frames in our experimental setting), if $o_1$ occurs in frame $t_i$ and $o_2$ in frame $t_j$ , then both frames' confidence are updated:", + "bbox": [ + 169, + 703, + 826, + 758 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {t _ {i}} \\leftarrow C _ {t _ {i}} + \\alpha \\cdot \\gamma_ {\\text {t i m e}}, \\quad C _ {t _ {j}} \\leftarrow C _ {t _ {j}} + \\alpha \\cdot \\gamma_ {\\text {t i m e}}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 767, + 825, + 785 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Causal Relation. A causal relation models an ordering constraint, enforcing that $o_1$ must appear at an earlier time than $o_2$ . Specifically, if $o_1 \\in \\Omega_{t_i}$ and $o_2 \\in \\Omega_{t_j}$ with $t_i < t_j$ , we update the confidence of frames $t_i$ and $t_j$ by:", + "bbox": [ + 169, + 792, + 823, + 835 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {t _ {i}} \\leftarrow C _ {t _ {i}} + \\alpha \\cdot \\gamma_ {\\text {c a u s a l}}, \\quad C _ {t _ {j}} \\leftarrow C _ {t _ {j}} + \\alpha \\cdot \\gamma_ {\\text {c a u s a l}}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 843, + 825, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Through this scoring mechanism, frames with detected relations will have greater confidence and are more likely to be retrieved as keyframes for the given query and video. We have also conducted hyperparameter search experiments, and find that $\\alpha = 0.3$ (from 0.3, 0.5, 0.7, 1.0) and $\\gamma_{r_{\\mathrm{type}}} = 0.5$ achieve the best results across different datasets.", + "bbox": [ + 169, + 868, + 825, + 922 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3.4 Distribution Update", + "text_level": 1, + "bbox": [ + 171, + 90, + 366, + 106 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "After each iteration of frame sampling, we merge newly obtained frame confidences into the global score distribution $\\{S_f\\}$ spanning all frames $f = 1,2,\\dots ,N_v$ . When a frame $f$ is selected for detection, its score is assigned to the confidence value $C_f$ , and the visitation counter $N_{v,f}$ is reset to 0. To incorporate temporal context, we diffuse this updated score to neighboring frames within a window of size $w$ . Denoting each nearby index by $f\\pm \\delta$ (for $\\delta \\in [-w,w]$ ), we apply:", + "bbox": [ + 169, + 109, + 823, + 179 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nS _ {f \\pm \\delta} \\leftarrow \\max \\left(S _ {f \\pm \\delta}, \\frac {S _ {f}}{1 + | \\delta |}\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 183, + 825, + 217 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this way, high-confidence frames raise the scores of close-by frames, reflecting temporal continuity. Following these local updates, the sampling distribution $P$ is refined using spline interpolation, and then normalized. This iteration proceeds until either the search budget $B$ is reached or all key objects have been successfully identified. The visualization of distribution in different iterations can be seen in Figure 3. Finally, the method outputs the top $K$ frames according to their terminal scores.", + "bbox": [ + 169, + 220, + 826, + 291 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Experiment", + "text_level": 1, + "bbox": [ + 171, + 297, + 305, + 313 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Benchmark Datasets", + "text_level": 1, + "bbox": [ + 171, + 316, + 357, + 330 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The proposed VSLS is systematically evaluated across four benchmark datasets: a) LONGVIDEOBENCH Ye et al. (2025a) for assessing long-context video-language comprehension capabilities; b) Video-MME Fu et al. (2024) as the first comprehensive benchmark for multimodal video analytics; c) HAYSTACK-LVBENCH, extended from LONGVIDEOBENCH with human-annotated frame index answers; and d) HAYSTACK-EGO4D, derived from EGO4D with similar annotations. While LONGVIDEOBENCH and Video-MME measure performance enhancement in QA accuracy, HAYSTACK-EGO4D and HAYSTACK-LVBENCH quantitatively evaluate keyframe selection accuracy through recall and precision metrics. Further details of datasets are provided in Appendix D.", + "bbox": [ + 169, + 335, + 826, + 448 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 171, + 454, + 346, + 467 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.1 Evaluation Metrics for Search Utility", + "text_level": 1, + "bbox": [ + 171, + 473, + 483, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our assessment framework emphasizes both effectiveness and efficiency. For search effectiveness, we use three metrics to compare model-predicted keyframes with human annotations, considering both individual frames and full sets—addressing the possibility of multiple valid keyframe sets per query. For frame-level comparison, we evaluate the alignment between a predicted frame $f_{\\mathrm{pt}}$ and a human-annotated frame $f_{\\mathrm{gt}}$ from two perspectives:", + "bbox": [ + 169, + 489, + 823, + 560 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Temporal coverage evaluates the coverage of ground truth frames by predicted frames in the temporal perspective, which can be described as:", + "bbox": [ + 169, + 563, + 823, + 590 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT _ {\\text {c o v e r}} \\left(T _ {\\mathrm {p t}}, T _ {\\mathrm {g t}}\\right) = \\frac {\\sum_ {i = 1} ^ {| N _ {\\mathrm {g t}} |} \\mathbb {I} \\left[ \\min _ {j} \\left| t _ {\\mathrm {g t}} ^ {i} - t _ {\\mathrm {p t}} ^ {j} \\right| \\leq \\delta \\right]}{| N _ {\\mathrm {g t}} |}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 594, + 825, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $T_{\\mathrm{pt}}$ and $T_{\\mathrm{gt}}$ denote the sets of predicted and ground truth timestamps, respectively. Here, $|N_{\\mathrm{gt}}|$ is the number of ground truth frames, $t_{\\mathrm{gt}}^i$ and $t_{\\mathrm{pt}}^j$ are the $i$ -th ground truth and $j$ -th predicted timestamps, respectively. $\\delta$ is the temporal similarity threshold defining the maximum allowed time deviation, and $\\mathbb{I}[\\cdot]$ is the indicator function, returning 1 if the condition holds and 0 otherwise.", + "bbox": [ + 169, + 657, + 823, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Visual Similarity is measured by the Structural Similarity Index (SSIM) Brunet et al. (2012), capturing structural detail, luminance, and contrast between $f_{\\mathrm{pt}}$ and $f_{\\mathrm{gt}}$ . For set-to-set comparison, the key challenge is defining inter-set similarity. We adopt Precision $P$ and Recall $R$ as complementary metrics: Precision checks whether each predicted frame matches any reference frame, while Recall ensures that all reference frames are represented. Given the ground truth set $F_{\\mathrm{gt}} = f^{j}\\mathrm{gt}^{n}j = 1$ and the predicted set $F_{\\mathrm{pt}} = f^{i}\\mathrm{pt}^{m}i = 1$ , we define the multimodal retrieval quality metrics as follows:", + "bbox": [ + 169, + 719, + 823, + 808 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} P \\left(F _ {\\mathrm {p t}}, F _ {\\mathrm {g t}}\\right) = \\frac {1}{\\left| F _ {\\mathrm {p t}} \\right|} \\sum_ {f _ {\\mathrm {p t}} ^ {i} \\in F _ {\\mathrm {p t}}} \\max _ {f _ {\\mathrm {g t}} ^ {j} \\in F _ {\\mathrm {g t}}} \\phi \\left(f _ {\\mathrm {p t}} ^ {i}, f _ {\\mathrm {g t}} ^ {j}\\right), \\\\ R \\left(F _ {\\mathrm {p t}}, F _ {\\mathrm {g t}}\\right) = \\frac {1}{\\left| F _ {\\mathrm {g t}} \\right|} \\sum_ {f _ {\\mathrm {g t}} ^ {j} \\in F _ {\\mathrm {g t}}} \\max _ {f _ {\\mathrm {p t}} ^ {i} \\in F _ {\\mathrm {p t}}} \\phi \\left(f _ {\\mathrm {g t}} ^ {j}, f _ {\\mathrm {p t}} ^ {i}\\right), \\end{array} \\right. \\tag {11a}\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 818, + 825, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\phi (\\cdot ,\\cdot)$ represents an extensible multimodal similarity metric function.", + "bbox": [ + 174, + 906, + 676, + 921 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/97000cb287c16fdfdb7eb5b57ac87b488b53bdf10f7712a12a7a1d22a55787d8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodTraining RequiredSearching EfficiencyOverall Task Efficiency
MatchingIterationTFLOPs ↓Latency (sec) ↓Latency (sec) ↓Acc ↑
Static Frame Sampling
UNIFORM-8 Ye et al. (2025a)Training-BasedN/AN/AN/A0.23.853.7
Dense Retrieval
VIDEOAGENT Fan et al. (2024)Training-BasedCLIP-1B Radford et al. (2021)840536.530.234.949.2
T*-RETRIEVAL Ye et al. (2025b)Training-BasedYOLO-WORLD-110M840216.128.632.257.3
Temporal Search
T*-ATTENTION Ye et al. (2025b)Training-BasedN/AN/A88.913.717.359.3
T*-DETECTOR Ye et al. (2025b)Training-FreeYOLO-WORLD-110M4331.77.311.159.8
VSLS (OURS)-DETECTORTraining-FreeYOLO-WORLD-110M4933.37.811.661.5
", + "bbox": [ + 173, + 89, + 823, + 202 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1: Evaluation of performance metrics across the LV-HAYSTACK benchmark, presenting both search efficiency and end-to-end processing overhead (combining search and inference stages).", + "bbox": [ + 169, + 209, + 823, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2.2 Evaluation Metrics for Search efficiency", + "text_level": 1, + "bbox": [ + 171, + 247, + 503, + 262 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Existing studies Fan et al. (2024); Park et al. (2024); Wang et al. (2024a,d); Wu and Xie (2023) have mainly concentrated on optimizing task-specific performance metrics while neglecting computational efficiency in temporal search operations. To systematically analyze this dimension, our evaluation framework incorporates two criteria: 1) FLOPs representing arithmetic operation complexity, and 2) Latency recording real-world execution duration.", + "bbox": [ + 169, + 265, + 826, + 335 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Evaluation of Search Framework efficiency", + "text_level": 1, + "bbox": [ + 171, + 347, + 511, + 362 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Current approaches for keyframe selection can be broadly categorized into three paradigms: statistic-based frame sampling, dense feature retrieval-based selection, and temporal search-based methods. As shown in Table 1, while uniform sampling achieves the fastest processing speed, its ignorance of frame semantics severely limits downstream task effectiveness. Although dense feature retrieval methods attain moderate accuracy improvements (57.3%), their exhaustive frame processing demands $4.2 \\times$ more TFLOPs and introduces $4.5 \\times$ higher latency than our temporal search approach. Crucially, our method introduces four visual semantic logic detectors during temporal search while maintaining comparable execution time to T* methods. This strategic design elevates downstream task accuracy to $61.5\\%$ , achieving the best performance-efficiency trade-off.", + "bbox": [ + 169, + 367, + 826, + 492 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.4 Visual Semantic Logic Search Performance", + "text_level": 1, + "bbox": [ + 171, + 503, + 514, + 517 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As demonstrated in Table 2, we evaluate VSLS on LONGVIDEOBENCH from two critical perspectives: visual similarity (measured by precision and recall) and temporal coverage. Our method achieves state-of-the-art performance across all metrics. Specifically, under the 32-frame setting, VSLS attains a precision of $74.5\\%$ and recall of $92.5\\%$ , outperforming all baselines in visual similarity. More notably, the temporal coverage of VSLS reaches $41.4\\%$ , surpassing the second-best method ( $T* at 36.5\\%$ ) by $13.4\\%$ —the largest margin among all comparisons. This significant improvement highlights the effectiveness of our visual semantic logic detection modules in identifying query-relevant keyframes with both semantic alignment and temporal completeness.", + "bbox": [ + 169, + 523, + 826, + 633 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "These results empirically support our core hypothesis: leveraging semantic and logical cues from text queries enables precise detection of relevant video frames. Improvements in visual similarity and temporal coverage confirm that VSLS effectively captures keyframes while preserving temporal coherence through visual-logical alignment.", + "bbox": [ + 169, + 640, + 500, + 738 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.5 Downstream Video QA Performance", + "text_level": 1, + "bbox": [ + 171, + 750, + 470, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To demonstrate the advantages of VSLS, we evaluate downstream video QA performance on LONGVIDEOBENCH and VIDEO-MME. As shown in Table 3, videos are grouped by length into Short, Medium, and Long (15-3600s, up to 60 mins). VSLS consistently achieves the highest accuracy in the long-video category across different frame counts and QA models. Compared to the baseline T*, incorporating our visual semantic logic relations (Figure 1) yields substantial gains.", + "bbox": [ + 169, + 768, + 498, + 906 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9418dbcf790187e3dd3a1499947366d89992e9107a4f5b6b4e378bd6721a2cc2.jpg", + "table_caption": [ + "Table 2: Search utility results on LONGVIDEOBENCH. Best scores in the 8-frame setting are underlined, and in the 32-frame setting are bold. Gray indicates results from the original paper." + ], + "table_footnote": [ + "These results confirm that modeling visual-logical relations is key to effective QA on long videos." + ], + "table_body": "
MethodFrameLONGVIDEOBENCH
Precision ↑Recall ↑Time ↑
Static Frame Sampling Method
UNIFORM Ye et al. (2025a)856.072.06.3
UNIFORM860.780.44.7
UNIFORM3258.781.624.9
UNIFORM3260.285.08.1
Dense Retrieval Method
VIDEOAGENT Fan et al. (2024)10.158.873.28.5
RETRIEVAL-BASED Ye et al. (2025b)863.165.56.3
RETRIEVAL-BASED3259.980.821.8
Temporal Searching Method
T* Ye et al. (2025b)858.472.77.1
T*875.388.226.2
VSLS (ours)875.688.626.3
T*3258.383.228.2
T*3274.090.336.5
VSLS (ours)3274.592.541.4
", + "bbox": [ + 509, + 719, + 826, + 892 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3269a0ba4ba415d7370c101f6a7dce166285be0b34ece8282452072c7694fb9a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LONGVIDEOBENCHVIDEO-MME
Model and SizeFrameVideo LengthModel and SizeFrameVideo Length
Long 900-3600sMedium 180-600sShort 15-60sLong 30-60minMedium 4-15min
GPT-4o Hurst et al. (2024)847.149.467.3GPT-4o855.260.2
GPT-4o + T*849.156.268.0GPT-4o + T*855.261.2
GPT-4o + VSLS (ours)851.258.974GPT-4o + VSLS (ours)856.960.7
INTERNVL 2.5-78B Chen et al. (2024d)855.757.374.0INTERNVL 2.5-78B852.655.5
INTERNVL 2.5-78B + VSLS (ours)858.061.574.0INTERNVL 2.5-78B + VSLS (ours)857.757.5
GPT-4o3253.856.574.0GPT-4o3255.261.0
GPT-4o + T*3255.358.872.0GPT-4o + T*3255.261.6
GPT-4o + VSLS (ours)3254.260.076.0GPT-4o + VSLS (ours)3255.261.9
LLAVA-ONEVISION-QWEN2-78B-OV3259.363.977.4LLaVA-OneVision-78B3260.062.2
PLLAVA-34B3249.150.866.8VIDEOLLAMA 23257.659.9
LLAVA-VIDEO-78B-QWEN212859.363.977.4ORYX-1.512859.365.3
MPLUG-OWL3-7B12853.958.873.7ARIA-8x3.5B25658.867.0
GPT-4o (0513)25661.666.776.8GEMINI-1.5-Pro (0615)1/0.5 fps67.474.3
", + "bbox": [ + 173, + 89, + 823, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Downstream task evaluation results on two benchmarks. All accuracy scores (\\%) in black are from our replication. We also cite the reported accuracy of SOTA models in gray (noting that their settings may differ and results may not be reproducible), along with their number of frames used for QA inference, for full transparency.", + "bbox": [ + 169, + 275, + 823, + 333 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Analysis", + "text_level": 1, + "bbox": [ + 171, + 343, + 277, + 359 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Coverage Analysis of Semantic-Logical Relations", + "text_level": 1, + "bbox": [ + 169, + 369, + 553, + 383 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To ascertain the practical applicability and coverage of our defined semantic-logical relations (spatial, temporal, attribute, and causal), we conducted an analysis of their detection across all queries in the LongVideoBench and VideoMME datasets. Our findings reveal a crucial insight: for every question posed within these extensive VQA benchmarks, our query analysis module successfully identified and mapped the query to at least one of the four defined logical relation types. This empirical result supports the completeness of our proposed relation set for interpreting the semantic and logical intent inherent in these VQA tasks.", + "bbox": [ + 169, + 395, + 826, + 492 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 Time Complexity", + "text_level": 1, + "bbox": [ + 171, + 505, + 334, + 520 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The proposed framework consists of two stages. First, VLMs such as LLAVA-7B and GPT-40 extract a semantic set $S$ from a video $V$ with $n$ frames. $S$ includes target objects, cue objects, and their relations, with their size constrained by prompt design. In the second stage, keyframe identification is performed via a heuristic search: $k$ candidates are iteratively selected using a scoring function $h(\\cdot ,S)$ . The score distribution scores $[n]$ is dynamically refined using outputs from the YOLO-WORLD detector.", + "bbox": [ + 169, + 525, + 500, + 676 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/70069406c3ee25f7805367e06c6d48170b97455f4be6514bb40965b70097b46f.jpg", + "image_caption": [ + "Figure 4: Average occurrences of detected semantic-logical relation types per question on the VideoMME and LongVideoBench datasets. Spatial relations are the most frequently identified, while all queries in both datasets triggered at least one of the four relation types." + ], + "image_footnote": [], + "bbox": [ + 511, + 530, + 821, + 675 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our analysis focuses on YOLO-WORLD detections, the main computational bottleneck due to their reliance on deep neural networks. Reducing the number of detections improves efficiency without sacrificing accuracy. At each iteration, the detector processes $k$ selected frames to match objects and relations in $S$ , yielding $k$ detections. The", + "bbox": [ + 169, + 683, + 500, + 780 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "search stops when all targets are found or the iteration budget $\\min(1000, 0.1 \\times V_t)$ (with $V_t$ as the video duration in seconds) is exhausted. In the worst case (e.g., videos with $>10,000$ frames and no matches), the cap is 1,000 iterations. Ideally, the evaluation function $h(\\cdot, S)$ assigns high confidence to target frames, making the algorithm resemble top- $k$ selection over $n$ candidates in $\\mathcal{O}(|S| \\log n)$ iterations Ye et al. (2025b), resulting in an average of $\\mathcal{O}(|S| k \\log n)$ YOLO-WORLD inferences.", + "bbox": [ + 169, + 779, + 826, + 851 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Experimental results also demonstrate that integrating relational information into the search algorithm incurs negligible computational overhead compared to the baseline T* approach. On the LV-HAYSTACK benchmark, the average iteration count increases from 42.94 (T*) to 48.82 iterations, representing a modest $13.69\\%$ rise in the time cost.", + "bbox": [ + 169, + 854, + 828, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Ablation Study of Four Relations", + "text_level": 1, + "bbox": [ + 171, + 90, + 442, + 104 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 4 illustrates the distribution of four logic relation types across LONGVIDEOBENCH and VIDEO-MME datasets, where spatial relations predominate, followed by attribute relations. In Table 4, we extract samples containing different relation types from LONGVIDEOBENCH to compare the object detection-based T* method with our VSLS approach. Experimental results demonstrate that VSLS achieves significant improvements across both image similarity metrics (SSIM Precision and SSIM Recall). Additionally, temporal coverage shows marked enhance", + "bbox": [ + 169, + 118, + 460, + 311 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/cb19cfb9dbee0c99a276e2b549faec542655bf11e4318afd9cb3e3b30c96200d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Logic TypeMethodLONGVIDEOBENCH
Precision ↑Recall ↑TC ↑
SpatialT*72.988.737.5
VSLS (ours)73.691.445.5
AttributeT*71.887.638.5
VSLS (ours)72.790.942.1
TimeT*76.789.237.3
VSLS (ours)77.592.536.1
CasualT*74.792.438.6
VSLS (ours)74.793.839.6
", + "bbox": [ + 477, + 98, + 818, + 243 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Comparison of our method (VSLS) with the baseline across four logic relation types on LONGVIDEOBENCH. Precision: SSIM Precision; Recall: SSIM Recall; TC: Temporal Coverage.", + "bbox": [ + 467, + 250, + 826, + 306 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ment for attribute, spatial, and causal relations, with spatial relations exhibiting the most substantial improvement (21.3% increase over T*). For the time relation category, we observe a slight decrease in temporal coverage, which may be attributed to the relative scarcity of time relation samples in the dataset, limiting the opportunity to demonstrate the advantages of VSLS. Nevertheless, Figure 1 provides visual evidence of how effectively leveraging time relations can facilitate downstream question-answering tasks.", + "bbox": [ + 169, + 311, + 823, + 395 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Related Work", + "text_level": 1, + "bbox": [ + 171, + 411, + 321, + 426 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Challenges in Long Video Understanding: Long video understanding is inherently more challenging than short-video or image-based tasks due to its rich temporal dynamics and massive redundancy Qian et al. (2024); Zeng et al. (2024); Yu et al. (2019). The large number of frames increases both memory and computational requirements, making straightforward dense sampling infeasible. Moreover, crucial events may span distant timestamps, demanding high-capacity models to capture long-range dependencies Ranasinghe et al. (2025); Shi et al. (2024); Chen et al. (2024b); Weng et al. (2024). Meanwhile, the diverse and continuous visual content raises noise and distractors; thus, strategies to effectively locate or distill essential parts of the video are of primary importance Zhang et al. (2023); Cheng et al. (2024b); Xu et al. (2023); Ye et al. (2025b).", + "bbox": [ + 169, + 441, + 826, + 568 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Existing Solutions based on VLMs typically share three core ideas: 1) video sampling or retrieval for efficiency, 2) multi-stage or interactive reasoning to handle complex questions, and 3) compact representation to accommodate the VLM's limited context window. For instance, retrieval-based pipelines partition a video into segments and employ a learned or rule-based retriever to identify the relevant chunks before passing them to a VLM Pan et al. (2023); Choudhury et al. (2023, 2025). Other lines of research compress each frame into minimal tokens to reduce computational overhead Li et al. (2024); Chen et al. (2024a); Song et al. (2024), or adopt a streaming mechanism to propagate memory representations along the temporal axis Qian et al. (2024); Wu et al. (2022); Liu et al. (2024). Beyond these efficiency-oriented approaches, LLM/VLM-as-planner frameworks factorize the process into a series of perception queries, enabling an agent to fetch additional frame-level details if needed Wang et al. (2024b); Zhang et al. (2024); Liao et al. (2024).", + "bbox": [ + 169, + 573, + 826, + 726 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 742, + 302, + 758 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we present Visual Semantic-Logical Search (VSLS), a novel framework that efficiently selects semantically keyframes for long video understanding by decomposing logical relationships between textual queries and visual elements. VSLS based on four defined logical dependencies (spatial co-occurrence, temporal proximity, attribute dependency, and causal order), significantly outperforms existing methods while sampling only $1.4\\%$ of video frames. The $8.7\\%$ improvement in GPT-40's long video QA accuracy demonstrates that query-guided visual semantic logic search effectively bridges the gap between textual queries and visual content. VSLS's plug-and-play nature enables seamless integration with existing pipelines, making it practical for real-world applications. Future work could consider more logical relations, learnable search methods, enhancing interpretability, and exploring more downstream tasks.", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dominique Brunet, Edward R. Vrscay, and Zhou Wang. On the mathematical properties of the structural similarity index. IEEE Transactions on Image Processing, 2012.", + "Jieneng Chen, Luoxin Ye, Ju He, Zhao-Yang Wang, Daniel Khashabi, and Alan Yuille. Llavolta: Efficient multi-modal models via stage-wise visual context compression. In arXiv preprint arXiv:2406.20092, 2024a.", + "Jr-Jen Chen, Yu-Chien Liao, Hsi-Che Lin, Yu-Chu Yu, Yen-Chun Chen, and Yu-Chiang Frank Wang. ReXTime: A benchmark suite for reasoning-across-time in videos. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b.", + "Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Zhenyu Tang, Li Yuan, et al. Sharegpt4video: Improving video understanding and generation with better captions. NeurIPS, 37:19472-19495, 2024c.", + "Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024d.", + "Tianheng Cheng, Lin Song, Yixiao Ge, Wenyu Liu, Xinggang Wang, and Ying Shan. Yolo-world: Real-time open-vocabulary object detection. CVPR, 2024a.", + "Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, and Lidong Bing. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms, 2024b.", + "Rohan Choudhury, Koichiro Niinuma, Kris M Kitani, and Laszlo A Jeni. Zero-shot video question answering with procedural programs. arXiv preprint arXiv:2312.00937, 2023.", + "Rohan Choudhury, Koichiro Niinuma, Kris M. Kitani, and László A. Jeni. Video question answering with procedural programs. In ECCV, 2025.", + "David Cohen. Universals in linguistic theory, 1968.", + "Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. ArXiv, abs/2403.11481, 2024.", + "Charles J Fillmore. The case for case. Bach and Harms (Ed.): Universals in Linguistic Theory, 1967.", + "Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. ArXiv, abs/2405.21075, 2024.", + "Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18995-19012, 2022.", + "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "Minkuk Kim, Hyeon Bae Kim, Jinyoung Moon, Jinwoo Choi, and Seong Tae Kim. Do you remember? dense video captioning with cross-modal memory retrieval. In CVPR, 2024.", + "Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In ECCV, 2024.", + "Jianxin Liang, Xiaojun Meng, Yueqian Wang, Chang Liu, Qun Liu, and Dongyan Zhao. End-to-end video question answering with frame scoring mechanisms and adaptive sampling. ArXiv, abs/2407.15047, 2024.", + "Ruotong Liao, Max Eler, Huiyu Wang, Guangyao Zhai, Gengyuan Zhang, Yunpu Ma, and Volker Tresp. Videoinsta: Zero-shot long video understanding via informative spatial-temporal reasoning with llms. In EMNLP Findings, 2024.", + "Shilong Liu, Hao Cheng, Haotian Liu, Hao Zhang, Feng Li, Tianhe Ren, Xueyan Zou, Jianwei Yang, Hang Su, Jun Zhu, et al. Llava-plus: Learning to use tools for creating multimodal agents. In European Conference on Computer Vision, pages 126-142. Springer, 2024." + ], + "bbox": [ + 171, + 112, + 826, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "William C Mann and Sandra A Thompson. Rhetorical structure theory: Toward a functional theory of text organization. Text-interdisciplinary Journal for the Study of Discourse, 8(3):243-281, 1988.", + "Leland Gerson Neuberg. Causality: models, reasoning, and inference, by juda pearl, cambridge university press, 2000. Econometric Theory, 19(4):675-685, 2003.", + "Junting Pan, Ziyi Lin, Yuying Ge, Xiatian Zhu, Renrui Zhang, Yi Wang, Yu Qiao, and Hongsheng Li. Retrieving-to-answer: Zero-shot video question answering with frozen large language models. In ICCV Workshops, 2023.", + "Jong Sung Park, Kanchana Ranasinghe, Kumara Kahatapitiya, Wonjeong Ryoo, Donghyun Kim, and Michael S. Ryoo. Too many frames, not all useful: Efficient strategies for long-form video qa. ArXiv, abs/2406.09396, 2024.", + "Rui Qian, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Shuangrui Ding, Dahua Lin, and Jiaqi Wang. Streaming long video understanding with large language models. In NeurIPS, 2024.", + "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021.", + "Manjusha Rajan and Latha Parameswaran. Key frame extraction algorithm for surveillance videos using an evolutionary approach. Scientific Reports, 15(1):536, 2025.", + "Kanchana Ranasinghe, Xiang Li, Kumara Kahapatitiya, and Michael S Ryoo. Understanding long videos with multimodal language models. In ICLR, 2025.", + "Yudi Shi, Shangzhe Di, Qirui Chen, and Weidi Xie. Unlocking video-llm via agent-of-thoughts distillation. arXiv preprint arXiv:2412.01694, 2024.", + "Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multi-modal llms. arXiv preprint arXiv:2409.10994, 2024.", + "John F. Sowa. Knowledge Representation: Logical, Philosophical, and Computational Foundations. Brooks/Cole Publishing Co., Pacific Grove, CA, USA, 2000.", + "Leonard Talmy. Toward a Cognitive Semantics (Volume 1: Concept Structuring Systems; Volume 2: Typology and Process in Concept Structuring). MIT Press, Cambridge, MA, USA, 2000.", + "Reuben Tan, Xineng Sun, Ping Hu, Jui hsien Wang, Hanieh Deilamsalehy, Bryan A. Plummer, Bryan Russell, and Kate Saenko. Koala: Key frame-conditioned long video-llm. CVPR, 2024.", + "Yunlong Tang, Jing Bi, Siting Xu, Luchuan Song, Susan Liang, Teng Wang, Daoan Zhang, Jie An, Jingyang Lin, Rongyi Zhu, et al. Video understanding with large language models: A survey. arXiv preprint arXiv:2312.17432, 2023.", + "Hengyi Wang, Haizhou Shi, Shiwei Tan, Weiyi Qin, Wenyuan Wang, Tunyu Zhang, Akshay Nambi, Tanuja Ganu, and Hao Wang. Multimodal needle in a haystack: Benchmarking long-context capability of multimodal large language models, 2025.", + "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, 2024a.", + "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, pages 58-76. Springer, 2024b.", + "Zhanyu Wang, Longyue Wang, Zhen Zhao, Minghao Wu, Chenyang Lyu, Huayang Li, Deng Cai, Luping Zhou, Shuming Shi, and Zhaopeng Tu. Gpt4video: A unified multimodal large language model for Instruction-followed understanding and safety-aware generation. In ACM MM, pages 3907-3916, 2024c.", + "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. ArXiv, abs/2405.19209, 2024d.", + "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024e." + ], + "bbox": [ + 173, + 90, + 826, + 910 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2024.", + "Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13587-13597, 2022.", + "Penghao Wu and Saining Xie. V*: Guided visual search as a core mechanism in multimodal llms. CVPR, 2023.", + "Jiaqi Xu, Cuiling Lan, Wenxuan Xie, Xuejin Chen, and Yan Lu. Retrieval-based video language model for efficient long video question answering. arXiv preprint arXiv:2312.04931, 2023.", + "Jinhui Ye, Zihan Wang, and Haosen Sun. Longvideohaystack. https://huggingface.co/datasets/LVHaystack/LongVideoHaystack, 2025a. v1.0.", + "Jinhui Ye, Zihan Wang, Haosen Sun, Keshigeyan Chandrasegaran, Zane Durante, Cristobal Eyzaguirre, Yonatan Bisk, Juan Carlos Niebles, Ehsan Adeli, Li Fei-Fei, Jiajun Wu, and Manling Li. Re-thinking temporal search for long-form video understanding. In CVPR, 2025b.", + "Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. National Science Review, 2024.", + "Sicheng Yu, Chengkai Jin, Huan Wang, Zhenghao Chen, Sheng Jin, Zhongrong Zuo, Xiaolei Xu, Zhenbang Sun, Bingni Zhang, Jiawei Wu, Hao Zhang, and Qianru Sun. Frame-voyager: Learning to query frames for video large language models. ArXiv, abs/2410.03226, 2024.", + "Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, 2019.", + "Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, Yali Wang, Yu Qiao, and Limin Wang. Timesuite: Improving mllms for long video understanding via grounded tuning, 2024.", + "Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. In EMNLP, 2023.", + "Lu Zhang, Tiancheng Zhao, Heting Ying, Yibo Ma, and Kyusong Lee. OmAgent: A multi-modal agent framework for complex video understanding with task divide-and-conquer. In EMNLP, 2024.", + "Zijia Zhao, Haoyu Lu, Yuqi Huo, Yifan Du, Tongtian Yue, Longteng Guo, Bingning Wang, Weipeng Chen, and Jing Liu. Needle in a video haystack: A scalable synthetic evaluator for video mllms. arXiv preprint arXiv:2406.09367, 2024.", + "Heqing Zou, Tianze Luo, Guiyang Xie, Fengmao Lv, Guangcong Wang, Junyang Chen, Zhuochen Wang, Hansheng Zhang, Huajian Zhang, et al. From seconds to hours: Reviewing multimodal large language models on comprehensive long video understanding. arXiv preprint arXiv:2409.18938, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 664 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Part I", + "text_level": 1, + "bbox": [ + 171, + 87, + 236, + 104 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 171, + 117, + 315, + 146 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table of Contents", + "text_level": 1, + "bbox": [ + 171, + 170, + 352, + 189 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Theoretical Underpinnings of Relation Categories 14", + "bbox": [ + 212, + 196, + 785, + 210 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A.1 Linguistic Grounding 14", + "A.2 Logical Grounding 14", + "A.3 Pragmatic Completeness for VQA 14" + ], + "bbox": [ + 233, + 213, + 785, + 256 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "B Performance 15", + "C Analysis of the Impact of Search Frame Count 15" + ], + "bbox": [ + 212, + 268, + 785, + 309 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "D Details of Datasets 16", + "bbox": [ + 212, + 321, + 785, + 335 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "D.1 Details ofVIDEO-MME 16", + "D.2 Details of LONGVIDEOBENCH 16", + "D.3 Details of LV-HAYSTACK 16", + "D.4 Details of EGO-4D 17" + ], + "bbox": [ + 233, + 337, + 785, + 395 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "E Detailed Algorithm 17", + "bbox": [ + 212, + 409, + 785, + 422 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "E.1 Algorithm Overview and Core Components 17", + "E.2 Implementation Considerations 19", + "E.3 Computational Complexity Analysis 19", + "E.4 Technical Implementation Details 19", + "E.5 Practical Application Examples 21", + "E.6 System Specifications for Reproductivity 21" + ], + "bbox": [ + 233, + 424, + 785, + 513 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "F Case Study of VSLS Keyframe Selection 21", + "G Iteration Analysis 22" + ], + "bbox": [ + 212, + 525, + 785, + 566 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "H Prompt 23", + "bbox": [ + 212, + 579, + 785, + 592 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "H.1 Prompt Template for Query Grounding 23", + "H.2 Prompt Template for Question Answering 23" + ], + "bbox": [ + 233, + 594, + 785, + 623 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "I Limitations 24", + "bbox": [ + 212, + 635, + 785, + 648 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "J Broader Impacts 24", + "bbox": [ + 212, + 662, + 785, + 675 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "J.1 Positive Impacts 24", + "J.2 Potential Considerations 24" + ], + "bbox": [ + 233, + 676, + 785, + 705 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Theoretical Underpinnings of Relation Categories", + "text_level": 1, + "bbox": [ + 171, + 89, + 627, + 107 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our choice of the four relation categories—spatial, temporal, attribute, and causal—is grounded in foundational concepts from linguistics and logic. While achieving absolute “completeness” in describing the infinite complexity of the real world is a formidable challenge, this selection aims to describe core aspects of events, states, and the way humans conceptualize and communicate them.", + "bbox": [ + 169, + 121, + 823, + 178 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 Linguistic Grounding", + "text_level": 1, + "bbox": [ + 171, + 194, + 369, + 209 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Semantic Roles and Case Grammar: Theories like Fillmore's Case Grammar Fillmore (1967) analyze sentences in terms of semantic roles that nominals play in relation to the verb (the event).", + "bbox": [ + 169, + 215, + 823, + 243 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Spatial relations directly correspond to roles like Locative (the location of an event or state) or Path (the trajectory of motion).", + "- Temporal relations align with Temporal roles, specifying when an event occurs or its duration.", + "- Attributes describe the properties of entities (participants) involved in these roles. While not direct case roles for verbs, they are fundamental for identifying and characterizing the \"who\" and \"what\" (e.g., Agent, Patient, Theme, Instrument) that possess these attributes during an event.", + "- Causal relations are central to understanding agency and event structure. Roles like Agent (the instigator of an action) or Cause (the non-volitional trigger of an event) highlight the importance of causality in linguistic descriptions of events." + ], + "bbox": [ + 171, + 244, + 825, + 372 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Lexical Semantics and Event Structure: Works in lexical semantics (e.g., following Pustejovsky Cohen (1968) on the generative lexicon, or Talmy Talmy (2000) on cognitive semantics) often decompose event meaning into fundamental components. Talmy Talmy (2000), for instance, extensively discusses how language structures concepts like space, time, and force dynamics (which inherently relate to causality). Events are situated in space and time, involve entities with specific attributes, and are often linked through causal chains (e.g., one action causing another, or an agent causing a change of state).", + "bbox": [ + 169, + 378, + 826, + 474 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Discourse Relations: Theories like Rhetorical Structure Theory (RST) Mann and Thompson (1988) identify relations that bind textual units together. Many of these fundamental relations are inherently temporal (e.g., Sequence), causal (e.g., Cause, Result, Purpose), or involve describing entities and their settings (which encompasses spatial and attributive information, often under relations like Elaboration or Background). This suggests that these four categories capture essential elements for constructing coherent descriptions and explanations, a core function of Video Question Answering (VQA).", + "bbox": [ + 169, + 479, + 825, + 579 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 Logical Grounding", + "text_level": 1, + "bbox": [ + 171, + 595, + 349, + 609 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Predicate Logic and Knowledge Representation: In formal logic and AI knowledge representation (e.g., Sowa Sowa (2000)), events and states are often represented using predicates with arguments that specify participants, locations, times, and properties. A typical event representation might implicitly or explicitly include Location(event, place), Time(event, time_interval), HasProperty(event, attribute_value), and relations like Causes(event1, event2). Our four categories provide a high-level abstraction over these common predicate types.", + "bbox": [ + 169, + 616, + 826, + 700 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Modal and Specialized Logics: Temporal Logic is specifically designed to reason about propositions qualified in terms of time.", + "bbox": [ + 171, + 705, + 826, + 733 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Spatial Logic deals with reasoning about spatial properties and relations between entities.", + "- Logics of Action and Causality (e.g., situation calculus, event calculus, or Pearl's work on causality Neuberg (2003)) explicitly model how actions bring about changes and the causal dependencies between events." + ], + "bbox": [ + 171, + 734, + 823, + 790 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.3 Pragmatic Completeness for VQA", + "text_level": 1, + "bbox": [ + 171, + 808, + 454, + 823 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "From a pragmatic standpoint, particularly for VQA, these four relations address the core \"Wh-questions\" humans often ask to understand a scene or event:", + "bbox": [ + 169, + 834, + 826, + 862 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- What/Who? (Identifies objects/entities, often distinguished by their attributes)", + "- Where? (Answered by spatial relations)", + "- When? (Answered by temporal relations)" + ], + "bbox": [ + 171, + 866, + 712, + 911 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- Why/How did it happen? (Often answered by causal relations or a sequence of events linked temporally and spatially)", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "While more fine-grained relations (as in Action Genome) undoubtedly provide deeper semantic detail, our chosen set aims to provide a foundational, yet computationally manageable, framework for keyframe selection based on the most common semantic and logical inferences required for a broad range of video queries. They represent a level of abstraction that is both meaningful for human queries and feasible for current visual-language models to parse and verify.", + "bbox": [ + 169, + 125, + 823, + 195 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In essence, these categories are not arbitrary but reflect fundamental dimensions along which events and states are structured, perceived, and communicated in language and reasoned about in logic. We believe they offer a robust and broadly applicable framework for the task at hand.", + "bbox": [ + 169, + 199, + 823, + 242 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Performance", + "text_level": 1, + "bbox": [ + 171, + 262, + 316, + 277 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Long-form video understanding presents unique challenges due to the complexity of temporal dynamics and cross-modal interactions in extended durations (900-3,600 seconds). Our comprehensive evaluation of the LVB-XL benchmark reveals significant performance gaps between existing approaches. While large-scale models like GPT-4O (32 frames) and INTERNVL 2.5-78B (16 frames) have demonstrated competence in short-video tasks, their direct application to long-form content (marked by circle sizes proportional to model parameters) yields suboptimal results (53.8% and 56.5% accuracy respectively).", + "bbox": [ + 169, + 292, + 826, + 391 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Our Visual Semantic-Logical Search (VSLS) framework addresses these limitations. This advancement enables consistent performance improvements across different architecture scales, elevating GPT-40 to $54.2\\%$ $(+0.4\\mathrm{pp})$ and achieving a remarkable $62.4\\%$ $(+5.9\\mathrm{pp})$ for INTERNLV 2.5-78B on this benchmark. The comparative analysis further suggests that VSLS's gains become particularly pronounced when processing longer visual sequences, highlighting its effectiveness in modeling extended temporal contexts.", + "bbox": [ + 169, + 396, + 826, + 481 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C Analysis of the Impact of Search Frame Count", + "text_level": 1, + "bbox": [ + 171, + 500, + 599, + 517 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/1c05698f5ebb877c71c8dc80fe7f27060313591cbce3eed88b06bbac45cee230.jpg", + "image_caption": [ + "Figure 5: Performance improvement with increasing search frames. VSLS consistently enhances accuracy and reaches near-human oracle performance at 64 frames." + ], + "image_footnote": [], + "bbox": [ + 256, + 540, + 743, + 781 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This section investigates the impact of the number of search frames on the performance of our Visual Language Models (VLMs) in the context of LONGVIDEOBENCH.", + "bbox": [ + 169, + 835, + 823, + 863 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 5 in the T* framework study empirically demonstrates the non-monotonic relationship between input frame quantity and model accuracy on the LONGVIDEOBENCH XL benchmark. Through systematic experimentation across 18 state-of-the-art VLMs, this visualization reveals a critical", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "phenomenon: excessive frame inputs degrade performance for models lacking temporal redundancy mitigation mechanisms.", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D Details of Datasets", + "text_level": 1, + "bbox": [ + 171, + 142, + 366, + 159 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D.1 Details of Video-MME", + "text_level": 1, + "bbox": [ + 171, + 175, + 385, + 189 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Video-MME (Video Multi-Modal Evaluation) dataset represents the first comprehensive benchmark tailored to assess the capabilities of Vision-Language Models (VLMs) in video understanding. Aiming to address limitations in existing benchmarks, it emphasizes diversity, temporal complexity, and multi-modal integration while ensuring high-quality human annotations. The dataset contains 900 carefully curated videos across six primary domains—Knowledge, Film and Television, Sports Competition, Artistic Performance, Life Record, and Multilingual—with 30 fine-grained subcategories such as astronomy, esports, and documentaries. These videos vary significantly in duration, ranging from short clips (11 seconds) to long-form content (up to 1 hour), enabling robust evaluation across temporal scales.", + "bbox": [ + 169, + 202, + 826, + 328 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Each video is paired with expert-annotated multiple-choice questions (2,700 QA pairs in total), rigorously validated to ensure clarity and reliance on visual or multi-modal context. Questions span 12 task types, including action recognition, temporal reasoning, and domain-specific knowledge, with a focus on scenarios where answers cannot be inferred from text alone. To quantify temporal complexity, the dataset introduces certificate length analysis, revealing that answering questions often requires understanding extended video segments (e.g., median lengths of 26 seconds for short videos and 890.7 seconds for long videos), surpassing the demands of prior benchmarks like EGOSchema.", + "bbox": [ + 169, + 333, + 826, + 431 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VIDEO-MME serves as a universal benchmark, applicable to both image- and video-focused MLLMs, and exposes key challenges for future research. These include improving architectures for long-sequence processing, developing datasets for complex temporal reasoning, and enhancing cross-modal alignment. By providing a rigorous evaluation framework,VIDEO-MME aims to drive progress toward MLLMs capable of understanding dynamic, real-world scenarios.", + "bbox": [ + 169, + 436, + 826, + 506 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D.2 Details of LONGVIDEOBENCH", + "text_level": 1, + "bbox": [ + 171, + 526, + 431, + 540 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The LONGVIDEOBENCH benchmark pioneers the evaluation of long-context interleaved video-language understanding in VLMs, addressing critical gaps in existing benchmarks through its focus on detailed retrieval and temporal reasoning over hour-long multimodal inputs. Designed to overcome the \"single-frame bias\" prevalent in prior video benchmarks, the novel referring reasoning paradigm enables models to locate and analyze specific contexts within extended sequences. The data set comprises 3,763 web-sourced videos that span various themes - movies, news, life vlogs, and knowledge domains (including art, history, and STEM) - with durations progressively grouped into four levels: 8-15 seconds, 15-60 seconds, 3-10 minutes, and 15-60 minutes. Each video is paired with aligned subtitles, forming interleaved multimodal inputs that mimic real-world viewing scenarios.", + "bbox": [ + 169, + 553, + 826, + 678 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The benchmark features 6,678 human-annotated multiple-choice questions categorized into 17 fine-grained task types across two levels: Perception (requiring object/attribute recognition in single scenes) and Relation (demanding temporal/causal reasoning across multiple scenes). Questions incorporate explicit referring queries (e.g., \"When the woman descends the rocky hill...\") that anchor reasoning to specific video moments, with an average question length of 43.5 words to ensure precision. Temporal complexity is quantified through duration-grouped analysis, where models must process up to 256 frames (at 1 fps) for hour-long videos, significantly exceeding the demands of predecessors like EGOSchema (180s videos).", + "bbox": [ + 169, + 684, + 826, + 796 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "D.3 Details of LV-HAYSTACK", + "text_level": 1, + "bbox": [ + 171, + 815, + 393, + 829 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The LV-HAYSTACK benchmark establishes the first comprehensive evaluation framework for temporal search in long-form video understanding, addressing critical limitations in existing synthetic needle-in-haystack benchmarks through real-world video annotations and multi-dimensional evaluation metrics. Designed to assess models' ability to locate minimal keyframe sets (typically 1-5 frames) from hour-long videos containing tens of thousands of frames, the dataset comprises 3,874 human", + "bbox": [ + 169, + 842, + 826, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "annotated instances spanning 150 hours of video content across two distinct categories: egocentric videos from EGO4D (101 hours) and allocentric videos from LONGVIDEOBENCH (57.7 hours).", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Organized into HAYSTACK-EGO4D and HAYSTACK-LVBENCH subsets, the benchmark features videos averaging 24.8 minutes in length (max 60 minutes) with 44,717 frames per video. Each instance contains:", + "bbox": [ + 169, + 126, + 823, + 167 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Expert-curated multi-choice questions requiring temporal reasoning (15.9 questions/video);", + "- Human-annotated keyframe sets (4.7 frames/question for egocentric, 1.8 frames/question for allocentric);", + "- Temporal and visual similarity metrics for precise search evaluation." + ], + "bbox": [ + 171, + 172, + 823, + 231 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.4 Details of EGO-4D", + "text_level": 1, + "bbox": [ + 171, + 251, + 346, + 265 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The EGO4D (Egocentric Computer Vision Benchmark) dataset establishes a transformative foundation for advancing research in first-person visual perception through unprecedented scale, diversity, and multi-modal integration. Designed to overcome limitations in existing egocentric datasets, it captures 3,670 hours of unscripted daily activities from 931 participants across 74 global locations and 9 countries, spanning household, workplace, leisure, and outdoor scenarios. The dataset features $30+$ fine-grained activity categories including carpentry, social gaming, and meal preparation, with videos ranging from brief interactions (8-minute clips) to extended continuous recordings (up to 10 hours), enabling comprehensive analysis of long-term behavioral patterns.", + "bbox": [ + 169, + 277, + 826, + 390 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Each video is enriched with multi-modal annotations totaling 3.85 million dense textual narrations (13.2 sentences/minute), coupled with 3D environment meshes, eye gaze tracking, stereo vision, and synchronized multi-camera views. Rigorous privacy protocols ensure ethical data collection, with 612 hours containing unblurred faces/audio for social interaction studies. The benchmark suite introduces five core tasks organized across temporal dimensions:", + "bbox": [ + 169, + 396, + 826, + 465 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Episodic Memory: Temporal localization of natural language queries (74K instances) and 3D object tracking using Matterport scans;", + "- **Hand-Object Interaction:** State change detection (1.3M annotations) with PNR (point-of-no-return) temporal localization;", + "- Social Understanding: Audio-visual diarisation (2,535h audio) and gaze-directed communication analysis;", + "- Action Forecasting: Anticipation of locomotion trajectories and object interactions." + ], + "bbox": [ + 171, + 470, + 825, + 571 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Quantitative analysis reveals the dataset's complexity: hand-object interactions involve 1,772 unique verbs and 4,336 nouns, while social scenarios contain 6.8 participant interactions per minute on average. Multi-modal fusion experiments demonstrate performance gains, with 3D environment context improving object localization accuracy by $18.7\\%$ compared to RGB-only baselines. State-of-the-art models achieve $68.9\\%$ accuracy in action anticipation tasks, yet struggle with long-term forecasting (41.2% accuracy for 5s predictions), highlighting critical challenges in temporal reasoning.", + "bbox": [ + 169, + 575, + 826, + 660 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "EGO4D's unique integration of egocentric video with complementary modalities (IMU data in 836h, gaze tracking in 45h) enables novel research directions in embodied AI and augmented reality. The dataset exposes fundamental limitations in current architectures, particularly in processing hour-long video contexts and synthesizing cross-modal signals—only $23\\%$ of tested models effectively utilized audio-visual synchronization cues. By providing standardized evaluation protocols and curated challenge subsets, EGO4D serves as a universal testbed for developing perceptive systems capable of understanding persistent 3D environments and complex human behaviors.", + "bbox": [ + 169, + 666, + 826, + 763 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E Detailed Algorithm", + "text_level": 1, + "bbox": [ + 171, + 787, + 370, + 803 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The detailed VSLS algorithm is represented in Algorithm 2.", + "bbox": [ + 171, + 821, + 560, + 835 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E.1 Algorithm Overview and Core Components", + "text_level": 1, + "bbox": [ + 171, + 856, + 519, + 871 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The algorithm operates as an adaptive search framework that intelligently explores video content (represented as set $V$ ) to locate frames matching semantic-logical query requirements $(Q)$ . Unlike", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 16 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: The completed Visual Semantic-Logical Search" + ], + "code_body": "Function SemanticLogicalTemporalSearch(V,Q,K, $\\Delta_t,\\tau ,\\alpha ,\\gamma$ .. \n $\\mathcal{O},\\mathcal{R}\\gets$ ParseQuestion(Q); // Extract key/cue objects and relationships \n $P\\leftarrow$ Uniform, $B\\leftarrow |V|,S\\leftarrow \\emptyset ,N_{v}\\leftarrow |V|$ // Initialize distribution and state \nwhile $B > 0$ and $|\\mathcal{O}| > 0$ do \n $k\\gets \\lfloor \\sqrt{B}\\rfloor ,G\\gets$ Grid(Sample $(P,k^2)$ ); // Adaptive grid sampling \n $\\Omega \\gets$ DetectObjects(G); // Detect objects in sampled frames \nforeach $g\\in G$ do \n $C_g\\gets$ CalculateBaseScore( $\\Omega [g])$ ; // Base detection confidence \nforeach $r\\in \\mathcal{R}$ do if r.type $=$ Spatial then $C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{spatial}}\\cdot$ CheckSpatialRelationship(r, $\\Omega [g])$ else if r.type $=$ Temporal then $C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{time}}\\cdot$ CheckTemporalRelationship(r, $\\Omega ,\\Delta_t)$ else if r.type $=$ Causal then $C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{causal}}\\cdot$ CheckCausalRelationship(r, $\\Omega$ ) else if r.type $=$ Attribute then $C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{attr}}\\cdot$ CheckAttributeRelationship(r, $\\Omega [g],\\tau$ UpdateScores(S,g,Cg); // Update global score registry DiffuseScores(S,w); // Temporal context propagation $P\\gets$ NormalizeDistribution(S), $B\\gets B - k^{2}$ // Update sampling distribution foreach $g\\in \\operatorname {TopK}(S,K)$ do if $\\Omega [g]\\cap \\mathcal{O}\\neq \\emptyset$ then $\\begin{array}{rl}{\\mathcal{O}}&{\\leftarrow\\mathcal{O}\\backslash\\Omega[g]}\\end{array}$ // Remove identified key objects \nreturn TopK(S,K); // Return top-K keyframes", + "bbox": [ + 173, + 111, + 823, + 448 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "traditional linear search methods, it employs a probabilistic sampling strategy that dynamically adjusts based on confidence scores from multiple relationship types.", + "bbox": [ + 169, + 488, + 823, + 517 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Initialization Phase The process begins by parsing the input query $Q$ into two fundamental components:", + "bbox": [ + 169, + 537, + 823, + 566 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\mathcal{O}$ : A set of key objects or entities to identify", + "- $\\mathcal{R}$ : A collection of relationships (spatial, temporal, causal, and attribute) that must be satisfied" + ], + "bbox": [ + 171, + 571, + 803, + 602 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The algorithm initializes with a uniform probability distribution $(P)$ across all video frames, establishing a budget $(B)$ equivalent to the total number of frames $(|V|)$ , and creating an empty score registry $(S)$ to track confidence values. This approach ensures unbiased initial exploration before evidence-guided refinement.", + "bbox": [ + 169, + 606, + 826, + 662 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Adaptive Sampling Strategy Rather than exhaustively processing every frame, the algorithm employs a square-root scaling sampling strategy where $k = \\lfloor \\sqrt{B} \\rfloor$ determines the sampling density. This provides a mathematical balance between exploration breadth and computational efficiency. The Grid function organizes sampled frames into a structured representation that preserves spatial-temporal relationships, facilitating subsequent relationship analysis.", + "bbox": [ + 169, + 684, + 826, + 756 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Multi-modal Object Detection The DetectObjects function applies state-of-the-art computer vision techniques to identify objects within each sampled frame. This step leverages deep neural networks pre-trained on diverse visual datasets, enabling recognition of a wide range of entities with their corresponding confidence scores and spatial locations within frames.", + "bbox": [ + 169, + 777, + 825, + 834 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Score Propagation and Distribution Update The DiffuseScores function implements a temporal context propagation mechanism that spreads confidence values to neighboring frames, acknowledging that relevant content likely extends beyond individual frames. This diffusion creates a smoothed confidence landscape that guides subsequent sampling.", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "After each iteration, the algorithm normalizes the accumulated scores to form an updated probability distribution, focusing future sampling on promising regions while maintaining exploration potential in unexamined areas.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Convergence Criteria and Termination The search continues until either:", + "bbox": [ + 171, + 148, + 681, + 162 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The sampling budget $(B)$ is exhausted, indicating comprehensive coverage of the video content", + "- All target objects $(\\mathcal{O})$ have been successfully identified at satisfactory confidence levels" + ], + "bbox": [ + 171, + 167, + 810, + 196 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This dual-termination approach balances thoroughness with efficiency, preventing unnecessary computation once objectives are met.", + "bbox": [ + 169, + 202, + 823, + 231 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Result Generation The algorithm concludes by returning the top-K frames with the highest confidence scores, representing the most relevant video segments that satisfy the semantic-logical query requirements. These keyframes provide a concise summary of the content matching the user's information needs.", + "bbox": [ + 169, + 244, + 823, + 301 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.2 Implementation Considerations", + "text_level": 1, + "bbox": [ + 171, + 316, + 437, + 333 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The algorithm's performance depends on several configurable parameters:", + "bbox": [ + 171, + 344, + 658, + 359 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\Delta_{t}$ : Temporal window size for relationship analysis", + "- $\\tau$ : Confidence threshold for attribute matching", + "- $\\alpha$ : Global relationship influence factor", + "- $\\gamma$ : Type-specific relationship weights" + ], + "bbox": [ + 171, + 362, + 529, + 422 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "These parameters can be tuned based on application requirements, video characteristics, and computational constraints. The algorithm's modular design allows for straightforward substitution of specific component implementations (e.g., different object detectors or relationship checkers) without altering the overall framework.", + "bbox": [ + 169, + 426, + 826, + 484 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.3 Computational Complexity Analysis", + "text_level": 1, + "bbox": [ + 171, + 500, + 470, + 515 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The time complexity scales with $O(\\sqrt{N})$ where $N$ is the total number of frames, significantly improving upon linear approaches. Space complexity remains $O(N)$ to maintain the probability distribution and score registry. The algorithm intelligently balances exploration and exploitation through its adaptive sampling approach, making it particularly suitable for large-scale video analysis tasks where exhaustive processing would be prohibitive.", + "bbox": [ + 169, + 526, + 823, + 597 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E.4 Technical Implementation Details", + "text_level": 1, + "bbox": [ + 171, + 613, + 450, + 628 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Object Detection and Feature Extraction To achieve real-time performance, the object detection module utilizes pre-trained deep convolutional neural network architectures, particularly variants based on FAST R-CNN andYOLO series. The system employs a two-stage detection strategy:", + "bbox": [ + 169, + 638, + 823, + 681 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Preliminary Detection: Using lightweight models to rapidly identify potential regions;", + "- Fine-grained Classification: Applying more sophisticated models for detailed classification on high-confidence regions." + ], + "bbox": [ + 171, + 686, + 823, + 729 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The feature extraction process leverages self-attention mechanisms from Visual Transformers (ViT), generating rich semantic embeddings robust to various visual variations such as scale, rotation, and illumination. Each identified object is associated with a feature vector $f_{i} \\in \\mathbb{R}^{d}$ , where $d = 512$ represents the dimensionality of the embedding space.", + "bbox": [ + 169, + 734, + 826, + 791 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Mathematical Formulations for Relationship Assessment The evaluation of various relationship types is based on precise mathematical definitions:", + "bbox": [ + 169, + 805, + 823, + 834 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Spatial Relationships Given bounding boxes $B_{i} = (x_{i},y_{i},w_{i},h_{i})$ and $B_{j} = (x_{j},y_{j},w_{j},h_{j})$ for two objects, the confidence for a spatial relationship $r_{\\text{spatial}}$ is calculated as:", + "bbox": [ + 169, + 848, + 823, + 878 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {s p a t i a l}} \\left(B _ {i}, B _ {j}, r\\right) = \\phi_ {r} \\left(B _ {i}, B _ {j}\\right) \\cdot \\psi \\left(B _ {i}\\right) \\cdot \\psi \\left(B _ {j}\\right), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 896, + 825, + 914 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where $\\phi_r$ is a relationship-specific compatibility function and $\\psi$ is the object detection confidence. For example, the compatibility for a \"contains\" relationship is defined as:", + "bbox": [ + 169, + 90, + 826, + 119 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {\\text {c o n t a i n s}} \\left(B _ {i}, B _ {j}\\right) = \\frac {\\operatorname {I o U} \\left(B _ {i} , B _ {j}\\right)}{\\operatorname {A r e a} \\left(B _ {j}\\right)}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 138, + 825, + 172 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Temporal Relationships Temporal relationships are calculated by evaluating object behavior patterns across a sequence of frames $\\{F_t, F_{t+1}, \\dots, F_{t+\\Delta_t}\\}$ :", + "bbox": [ + 169, + 186, + 823, + 217 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {t e m p o r a l}} \\left(O _ {i}, O _ {j}, r, \\Delta_ {t}\\right) = \\prod_ {k = 0} ^ {\\Delta_ {t} - 1} T _ {r} \\left(O _ {i} ^ {t + k}, O _ {j} ^ {t + k + 1}\\right), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 234, + 825, + 277 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $T_{r}$ is a relationship-specific temporal transition matrix and $O_{i}^{t}$ represents the state of object $i$ at time $t$ .", + "bbox": [ + 169, + 290, + 823, + 319 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Causal Relationships Causal relationships utilize a Bayesian network framework to compute conditional probabilities:", + "bbox": [ + 169, + 335, + 823, + 364 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {c a u s a l}} \\left(E _ {i}, E _ {j}\\right) = P \\left(E _ {j} \\mid E _ {i}\\right) \\cdot \\log \\frac {P \\left(E _ {j} \\mid E _ {i}\\right)}{P \\left(E _ {j}\\right)}, \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 383, + 825, + 417 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $E_{i}$ and $E_{j}$ represent the presumed cause event and effect event, respectively.", + "bbox": [ + 169, + 429, + 718, + 445 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Attribute Relationships Attribute evaluation employs cosine similarity metrics between feature vectors and attribute prototypes:", + "bbox": [ + 169, + 460, + 823, + 489 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {a t t r}} \\left(O _ {i}, a\\right) = \\max \\left(0, \\cos \\left(f _ {i}, p _ {a}\\right) - \\tau\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 511, + 825, + 527 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $p_a$ is the prototype vector for attribute $a$ and $\\tau$ is the minimum similarity threshold.", + "bbox": [ + 169, + 540, + 761, + 555 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Score Propagation Algorithm Temporal score propagation is implemented through a weighted diffusion process, analogous to heat diffusion on a graph structure:", + "bbox": [ + 169, + 571, + 823, + 599 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nS ^ {\\prime} (t) = S (t) + \\sum_ {k \\in \\mathcal {N} (t)} w _ {k, t} \\cdot S (k), \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 618, + 825, + 652 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $\\mathcal{N}(t)$ represents the temporal neighborhood of frame $t$ , and $w_{k,t}$ is a weight based on temporal distance, defined as:", + "bbox": [ + 169, + 666, + 823, + 695 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nw _ {k, t} = \\exp \\left(- \\frac {\\left| k - t \\right| ^ {2}}{2 \\sigma^ {2}}\\right), \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 713, + 825, + 750 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $\\sigma$ controls the diffusion range.", + "bbox": [ + 169, + 760, + 416, + 775 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Adaptive Sampling Optimization The sampling strategy is further improved through a dynamically adjusted Thompson sampling method, modeling the probability distribution $P$ as a Beta distribution with shape parameters updated through previous observations:", + "bbox": [ + 169, + 791, + 823, + 834 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nP (t) \\sim \\operatorname {B e t a} \\left(\\alpha_ {t} + \\sum_ {i} S _ {i} (t), \\beta_ {t} + n - \\sum_ {i} S _ {i} (t)\\right), \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 852, + 825, + 883 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $\\alpha_{t}$ and $\\beta_{t}$ are prior hyperparameters and $n$ is the total number of observations.", + "bbox": [ + 169, + 897, + 728, + 912 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "E.5 Practical Application Examples", + "text_level": 1, + "bbox": [ + 171, + 90, + 436, + 107 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In practical visual search scenarios, the algorithm processes complex queries such as \"a person wearing a blue shirt sits down at a table and then picks up a coffee cup\":", + "bbox": [ + 169, + 119, + 823, + 148 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Query parsing identifies key objects (person, shirt, table, coffee cup) and relationships (blue attribute, sitting action, temporal before-after relation, spatial proximity);", + "- Adaptive sampling selects representative frames from the video;", + "- Multi-rerelationship evaluation integrates various sources of evidence;", + "- Score propagation establishes a unified confidence landscape across related frame sets;", + "- Result generation provides a concise summary of the most relevant segments in the video." + ], + "bbox": [ + 171, + 152, + 825, + 255 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This semantic-logical-temporal search framework represents a significant advancement in multimodal content retrieval, enabling natural language queries that incorporate complex relationships across objects, time, and causal chains.", + "bbox": [ + 169, + 258, + 823, + 301 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E.6 System Specifications for Reproductivity", + "text_level": 1, + "bbox": [ + 171, + 325, + 498, + 340 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Our experiments were conducted on high-performance servers, each equipped with either an Intel(R) Xeon(R) Platinum 8378A CPU @ 3.00GHz or an Intel(R) Xeon(R) Platinum 8358P CPU @ 2.60GHz, 1TB of RAM, and 4/6 NVIDIA A800 GPUs with 80GB memory. Machines with 4 GPUs are configured with the SXM4 version, while those with 6 GPUs use the PCIe version. The software environment included Python 3.11, PyTorch 2.4, and NCCL 2.21.5 for reactivity.", + "bbox": [ + 169, + 353, + 826, + 424 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F Case Study of VSLS Keyframe Selection", + "text_level": 1, + "bbox": [ + 171, + 450, + 537, + 468 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/94d01838d210618c918d4b0d5acc496b6d56923e612c4a95d392bb84b400ab9a.jpg", + "image_caption": [ + "Figure 6: Qualitative comparison of frame selection strategies demonstrates VSLS's ability to pinpoint query-critical moments (e.g., the subject presenting pink objects) with temporal precision, while baseline approaches exhibit color misinterpretation (brown) due to suboptimal frame choices. VSLS maintains superior temporal diversity and content relevance, effectively avoiding the redundant selections observed in comparative methods." + ], + "image_footnote": [], + "bbox": [ + 240, + 502, + 754, + 720 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "As shown in Figure 6, the VSLS framework demonstrates its effectiveness through a video question-answering case study involving temporal handwriting analysis. The experiment focuses on distinguishing between two sequential events: a brown pen writing \"guitar\" at 2 seconds and a pink pen rewriting the same word at 3 seconds, with the query requiring identification of the second occurrence's pen color.", + "bbox": [ + 169, + 821, + 826, + 891 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "VSLS's analytical process unfolds through three interpretable phases:", + "bbox": [ + 171, + 897, + 624, + 912 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Semantic Logic Extraction: Identifies core visual entities (handwritten text, pen, paper) and constructs temporal relationships through triplet formulation: (text, time, pen), establishing the framework for tracking writing instrument changes;", + "- Temporal Relevance Scoring: The gray relevance curve reveals precise temporal localization, with peak scores aligning perfectly with ground truth positions at 2s and 3s, contrasting sharply with baseline methods' random fluctuations;", + "- Search Pattern Visualization: Demonstrates VSLS's focused inspection near critical moments versus uniform sampling's scattered temporal coverage, explaining the baseline's failure to detect the pink pen." + ], + "bbox": [ + 171, + 90, + 825, + 219 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "This case study yields two critical insights about VSLS's temporal reasoning:", + "bbox": [ + 171, + 223, + 676, + 238 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Sequential Event Disambiguation: The system successfully differentiates between near-identical visual events through:", + "bbox": [ + 171, + 242, + 823, + 270 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- First writing instance: Brown pen detection(false positive);", + "- Second writing instance: Pink pen detection(true positive).", + "- Explanation of answer generation disparity: VSLS produces the correct answer (\"Pink\") versus uniform sampling's erroneous baseline (\"Brown\") due to temporal reasoning failures." + ], + "bbox": [ + 171, + 271, + 823, + 329 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The spatial-temporal alignment between relevance peaks and ground truth positions confirms VSLS's unique capacity to synchronize semantic logic with visual evidence flow. This case particularly highlights the method's superiority in scenarios requiring precise discrimination of recurrent events with subtle visual variations.", + "bbox": [ + 169, + 333, + 825, + 388 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G Iteration Analysis", + "text_level": 1, + "bbox": [ + 171, + 409, + 361, + 426 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/a12bb641ca82972e0549859e62e742682b2319ef656a04a9604927b07e64f4ec.jpg", + "image_caption": [ + "Figure 7: The comparative visualization of iteration counts on the medium-length video subset of the VIDEO-MME dataset demonstrates that our method consistently requires a higher number of iterations compared to the T* approach." + ], + "image_footnote": [], + "bbox": [ + 344, + 446, + 656, + 568 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "As shown in Fig 7, incorporating relations into the search algorithm will increase the average number of iterations for the video of medium length in the Video-MME dataset from 15.9 to 23.8. The overall distribution of video iteration will not be significantly changed.", + "bbox": [ + 169, + 632, + 825, + 676 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "H Prompt", + "text_level": 1, + "bbox": [ + 171, + 89, + 277, + 107 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "H.1 Prompt Template for Query Grounding", + "text_level": 1, + "bbox": [ + 171, + 117, + 495, + 133 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the prompt we used for query grounding.", + "bbox": [ + 171, + 138, + 495, + 156 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Prompt Template for Query Grounding", + "text_level": 1, + "bbox": [ + 197, + 164, + 457, + 181 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Analyze the following video frames and the question:", + "bbox": [ + 197, + 191, + 550, + 207 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question: ", + "bbox": [ + 197, + 207, + 356, + 220 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Options: <0options>", + "bbox": [ + 197, + 220, + 339, + 233 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Step 1: Key Object Identification", + "bbox": [ + 197, + 234, + 419, + 247 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Extract 3-5 core objects detectable by computer vision", + "- Use YOLO-compatible noun phrases (e.g., \"person\", \"mic\")", + "- Format: Key Objects: obj1, obj2, obj3" + ], + "bbox": [ + 215, + 247, + 620, + 290 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Step 2: Contextual Cues", + "bbox": [ + 197, + 290, + 362, + 301 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- List 2-4 scene elements that help locate key objects based on options provided", + "- Use detectable items (avoid abstract concepts)", + "- Format: Cue Objects: cue1, cue2, cue3" + ], + "bbox": [ + 215, + 303, + 736, + 344 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Step 3: Relationship Triplets", + "bbox": [ + 197, + 344, + 390, + 358 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Relationship types:", + "bbox": [ + 215, + 359, + 352, + 372 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Spatial: Objects must appear in the same frame", + "- Attribute: Color/size/material descriptions (e.g., \"red clothes\", \"large\")", + "- Time: Appear in different frames within a few seconds", + "- Causal: There is a temporal order between the objects" + ], + "bbox": [ + 232, + 373, + 705, + 426 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Format of Relations: (object, relation_type, object), relation_type should be exactly one of spatial/attribute/time/causal", + "bbox": [ + 197, + 426, + 797, + 454 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Output Rules", + "text_level": 1, + "bbox": [ + 197, + 455, + 295, + 468 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. One line each for Key Objects/Cue Objects/Rel starting with exact prefixes", + "2. Separate items with comma except for triplets where items are separated by semicolon", + "3. Never use markdown or natural language explanations", + "4. If you cannot identify any key objects or cue objects from the video provided, please just identify the possible key or cue objects from the question and options provided" + ], + "bbox": [ + 199, + 468, + 797, + 537 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Below is an example of the procedure:", + "text_level": 1, + "bbox": [ + 197, + 537, + 465, + 551 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question: For \"When does the person in red clothes appear with the dog?\"", + "bbox": [ + 215, + 551, + 702, + 565 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Response:", + "bbox": [ + 215, + 565, + 284, + 578 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Key Objects: person, dog, red clothes", + "Cue Objects: grassy_area, leash, fence", + "Rel: (person; attribute; red clothes), (person; spatial; dog)" + ], + "bbox": [ + 232, + 579, + 733, + 619 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Format your response EXACTLY like this in three lines:", + "text_level": 1, + "bbox": [ + 197, + 619, + 591, + 633 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Key Objects: object1, object2, object", + "bbox": [ + 232, + 633, + 529, + 648 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Cue Objects: object1, object2, object", + "bbox": [ + 232, + 648, + 527, + 661 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Rel: (object1; relation_type1; object2), (object3; relation_type2; object4)", + "bbox": [ + 199, + 662, + 797, + 689 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "H.2 Prompt Template for Question Answering", + "text_level": 1, + "bbox": [ + 171, + 717, + 511, + 734 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the prompt we used for question answering.", + "bbox": [ + 171, + 739, + 511, + 753 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Prompt Template for Question Answering", + "text_level": 1, + "bbox": [ + 197, + 763, + 475, + 780 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Select the best answer to the following multiple-choice question based on the video.", + "bbox": [ + 197, + 791, + 750, + 806 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 806, + 259, + 819 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 821, + 259, + 834 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": ".", + "bbox": [ + 197, + 837, + 220, + 843 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question: ", + "bbox": [ + 197, + 847, + 354, + 859 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Options: <0options>", + "bbox": [ + 197, + 861, + 339, + 875 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Answer with the option's letter from the given choices directly.", + "bbox": [ + 197, + 875, + 611, + 888 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Your response format should be strictly an upper case letter A,B,C,D or E.", + "bbox": [ + 197, + 888, + 683, + 902 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "I Limitations", + "text_level": 1, + "bbox": [ + 171, + 89, + 302, + 104 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Despite the promising results of our VSLS framework, we acknowledge several limitations: First, although our approach reduces the required frame sampling to just $1.4\\%$ , the computational complexity remains a consideration for extremely long videos, with a search overhead of approximately 7.8 seconds. This may present challenges for real-time or low-latency applications. Besides, the performance of VSLS is bounded by the capabilities of the underlying object detector (YOLO-WORLD). Detection accuracy may degrade under challenging visual conditions such as poor lighting, occlusion, or unusual camera angles, potentially affecting temporal coverage.", + "bbox": [ + 169, + 119, + 826, + 219 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "J Broader Impacts", + "text_level": 1, + "bbox": [ + 171, + 237, + 349, + 255 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Our Visual Semantic-Logical Search (VSLS) framework primarily offers positive societal impacts as a foundational algorithm for efficient keyframe selection in long videos.", + "bbox": [ + 169, + 268, + 823, + 297 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "J.1 Positive Impacts", + "text_level": 1, + "bbox": [ + 171, + 311, + 328, + 328 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Educational Applications: VSLS enables students and educators to quickly locate relevant segments in instructional videos, improving learning efficiency for visual content.", + "- Research Enhancement: Scientists across disciplines can benefit from more efficient analysis of video archives, particularly those studying behavioral patterns or analyzing historical footage.", + "- Computational Efficiency: By sampling only $1.4\\%$ of frames on average, our approach reduces computational requirements and energy consumption, contributing to more sustainable AI applications.", + "- Accessibility: Our framework can be integrated into assistive technologies for individuals with cognitive processing challenges, helping them identify and focus on critical moments in video content." + ], + "bbox": [ + 173, + 333, + 825, + 473 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "J.2 Potential Considerations", + "text_level": 1, + "bbox": [ + 171, + 492, + 383, + 507 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "As a foundational algorithm, VSLS has limited direct negative impacts. However, like any computer vision technology, applications built upon it should be mindful of general considerations:", + "bbox": [ + 169, + 517, + 823, + 546 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Underlying Model Biases: The performance of VSLS depends partly on object detection systems (e.g.,YOLO-World), so it inherits any limitations or biases present in these components. Our modular design allows for substitution with improved detection systems as they become available.", + "bbox": [ + 171, + 550, + 826, + 594 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 934, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "NeurIPS Paper Checklist", + "text_level": 1, + "bbox": [ + 171, + 89, + 388, + 107 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "1. Claims", + "text_level": 1, + "bbox": [ + 210, + 116, + 285, + 130 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?", + "bbox": [ + 228, + 135, + 823, + 164 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 230, + 169, + 330, + 183 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Justification: The abstract and introduction clearly state the main contributions of our work, including (1) the proposal of a semantics-driven keyframe search framework using four logical relations, (2) performance gains on multiple long video QA benchmarks, (3) efficient frame sampling $(1.4\\%)$ with state-of-the-art results, and (4) plug-and-play compatibility with VLM/LLM pipelines. These claims are supported by both the method and experimental sections (see Sections \"Introduction\", \"Method\", and \"Experiment\"), and limitations are discussed in the main paper and Appendix I. The claims are fully aligned with the presented theoretical and empirical results.", + "bbox": [ + 228, + 186, + 826, + 299 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 303, + 308, + 316 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the abstract and introduction do not include the claims made in the paper.", + "- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers.", + "- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings.", + "- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper." + ], + "bbox": [ + 230, + 319, + 823, + 446 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "2. Limitations", + "text_level": 1, + "bbox": [ + 210, + 452, + 316, + 465 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Question: Does the paper discuss the limitations of the work performed by the authors?", + "bbox": [ + 228, + 470, + 803, + 486 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 230, + 489, + 330, + 503 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Justification: The paper discusses limitations in Appendix I.", + "bbox": [ + 228, + 508, + 624, + 523 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 529, + 308, + 542 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper.", + "- The authors are encouraged to create a separate \"Limitations\" section in their paper.", + "- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be.", + "- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated.", + "- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon.", + "- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size.", + "- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness.", + "- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations." + ], + "bbox": [ + 230, + 544, + 823, + 912 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "3. Theory assumptions and proofs", + "text_level": 1, + "bbox": [ + 209, + 90, + 455, + 106 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?", + "bbox": [ + 228, + 111, + 823, + 140 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Answer: [NA]", + "bbox": [ + 228, + 145, + 330, + 159 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Justification: The paper does not include formal theoretical results, theorems, or proofs. Our work is primarily methodological and experimental; all mathematical formulations are used to describe the algorithm and its components, but no formal theorems are claimed or proved. Therefore, this item is not applicable.", + "bbox": [ + 228, + 165, + 826, + 222 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 228, + 227, + 308, + 239 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not include theoretical results.", + "- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced.", + "- All assumptions should be clearly stated or referenced in the statement of any theorems.", + "- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition.", + "- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material.", + "- Theorems and Lemmas that the proof relies upon should be properly referenced." + ], + "bbox": [ + 230, + 243, + 825, + 387 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "4. Experimental result reproducibility", + "text_level": 1, + "bbox": [ + 209, + 393, + 483, + 407 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?", + "bbox": [ + 227, + 412, + 826, + 455 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 228, + 460, + 330, + 474 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Justification: The paper provides comprehensive details required for reproducibility, including descriptions of all datasets used (see Section \"Details of Datasets\" and Appendix D), implementation details of the proposed algorithm (see \"Method\" and \"Algorithm Overview\"), hyperparameter choices, prompt templates (Appendix \"Prompt\"), and evaluation protocols for each experiment. We also specify the object detection models and baselines used, and state that the code will be publicly released. This level of detail allows other researchers to replicate the main experiments and validate our claims.", + "bbox": [ + 227, + 479, + 826, + 579 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 228, + 584, + 310, + 597 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not include experiments.", + "- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not.", + "- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable." + ], + "bbox": [ + 230, + 599, + 823, + 685 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed.", + "bbox": [ + 230, + 686, + 825, + 810 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example", + "bbox": [ + 230, + 811, + 825, + 852 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm.", + "(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully." + ], + "bbox": [ + 241, + 854, + 823, + 911 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset).", + "(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results." + ], + "bbox": [ + 243, + 90, + 823, + 218 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "5. Open access to data and code", + "text_level": 1, + "bbox": [ + 209, + 220, + 439, + 234 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?", + "bbox": [ + 228, + 239, + 826, + 281 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "text_level": 1, + "bbox": [ + 228, + 286, + 330, + 300 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Justification: We state in the abstract and main text that the code will be publicly released. All datasets used in our experiments are from public benchmarks (LONGVIDEOBENCH,VIDEO-MME, HAYSTACK-LVBENCH, EGO4D), and details for data access are provided in Appendix D. Instructions for running our framework, data preparation, and experiment replication will be included in the released code repository. Thus, researchers will be able to access both code and data with clear instructions for full reproducibility.", + "bbox": [ + 228, + 304, + 826, + 388 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 228, + 392, + 310, + 405 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that paper does not include experiments requiring code.", + "- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.", + "- While we encourage the release of code and data, we understand that this might not be possible, so \"No\" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark).", + "- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.", + "- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc.", + "- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why.", + "- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable).", + "- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted." + ], + "bbox": [ + 230, + 407, + 823, + 679 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "6. Experimental setting/details", + "text_level": 1, + "bbox": [ + 209, + 681, + 431, + 696 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?", + "bbox": [ + 228, + 700, + 826, + 742 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "text_level": 1, + "bbox": [ + 228, + 747, + 330, + 762 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Justification: The paper specifies all relevant experimental details, including descriptions of dataset splits, hyperparameters, evaluation metrics, and prompt templates (see \"Experiment,\" Table captions, and Appendix D). As our method is training-free, we clarify in the main text which components rely on pre-trained models and explicitly describe all parameter settings for reproducibility. This ensures that readers can fully understand and interpret the reported results.", + "bbox": [ + 228, + 765, + 826, + 848 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 228, + 853, + 310, + 866 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not include experiments.", + "- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them." + ], + "bbox": [ + 230, + 868, + 823, + 912 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- The full details can be provided either with the code, in appendix, or as supplemental material.", + "bbox": [ + 230, + 90, + 823, + 118 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "7. Experiment statistical significance", + "text_level": 1, + "bbox": [ + 210, + 125, + 472, + 140 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?", + "bbox": [ + 228, + 145, + 823, + 174 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Answer: [No]", + "text_level": 1, + "bbox": [ + 228, + 180, + 326, + 194 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Justification: The paper does not report error bars or formal statistical significance tests for the main experimental results, as our approach is deterministic and uses fixed dataset splits and pre-trained models. Metrics are reported as single values following common practice in recent long video QA benchmarks. While this is standard in the area, we acknowledge that including error bars or additional significance analysis would further strengthen the experimental evaluation.", + "bbox": [ + 228, + 199, + 823, + 282 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 228, + 289, + 310, + 301 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not include experiments.", + "- The authors should answer \"Yes\" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper.", + "- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions).", + "- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.)", + "- The assumptions made should be given (e.g., Normally distributed errors).", + "- It should be clear whether the error bar is the standard deviation or the standard error of the mean.", + "- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a $96\\%$ CI, if the hypothesis of Normality of errors is not verified.", + "- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates).", + "- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text." + ], + "bbox": [ + 230, + 305, + 823, + 590 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "8. Experiments compute resources", + "text_level": 1, + "bbox": [ + 210, + 597, + 457, + 612 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?", + "bbox": [ + 228, + 617, + 825, + 659 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "text_level": 1, + "bbox": [ + 228, + 665, + 330, + 680 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Justification: The paper specifies the computing environment in Appendix E.6, and reports both latency and FLOPs for major baselines and our method in Table 1. We also provide the number of iterations, average processing time, and model sizes in the main text and tables. This information is sufficient for others to estimate compute requirements and reproduce the experiments.", + "bbox": [ + 228, + 685, + 823, + 755 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 228, + 760, + 310, + 773 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not include experiments.", + "- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage.", + "- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute.", + "- The paper should disclose whether the full research project required more compute than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper)." + ], + "bbox": [ + 230, + 776, + 823, + 891 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "9. Code of ethics", + "text_level": 1, + "bbox": [ + 210, + 897, + 333, + 910 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?", + "bbox": [ + 228, + 90, + 823, + 119 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 228, + 125, + 330, + 138 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Justification: The research follows the NeurIPS Code of Ethics. All datasets used are publicly available, appropriately licensed, and include human annotation with proper privacy safeguards (see Appendix D). No personally identifiable information or sensitive data is used. The proposed methods and experiments present no foreseeable risk of harm, discrimination, or privacy violation. Anonymity is preserved in all supplementary materials.", + "bbox": [ + 227, + 143, + 826, + 214 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Guidelines:", + "bbox": [ + 228, + 217, + 308, + 229 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics.", + "- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics.", + "- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction)." + ], + "bbox": [ + 230, + 232, + 823, + 304 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "10. Broader impacts", + "text_level": 1, + "bbox": [ + 202, + 308, + 352, + 323 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?", + "bbox": [ + 228, + 328, + 823, + 357 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 228, + 359, + 330, + 375 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Justification: Our paper discusses broader impacts in Appendix J.", + "bbox": [ + 228, + 378, + 661, + 393 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Guidelines:", + "bbox": [ + 228, + 398, + 310, + 411 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that there is no societal impact of the work performed.", + "- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact.", + "- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations.", + "- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster.", + "- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology.", + "- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML)." + ], + "bbox": [ + 230, + 414, + 823, + 723 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "11. Safeguards", + "text_level": 1, + "bbox": [ + 202, + 728, + 313, + 742 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?", + "bbox": [ + 228, + 747, + 825, + 790 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Answer: [NA]", + "bbox": [ + 228, + 794, + 330, + 808 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize.", + "bbox": [ + 228, + 811, + 823, + 896 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- The answer NA means that the paper poses no such risks.", + "bbox": [ + 230, + 897, + 624, + 912 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters.", + "- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images.", + "- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort." + ], + "bbox": [ + 230, + 90, + 823, + 218 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "12. Licenses for existing assets", + "text_level": 1, + "bbox": [ + 202, + 223, + 419, + 239 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?", + "bbox": [ + 228, + 243, + 823, + 286 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Answer: [NA]", + "bbox": [ + 230, + 290, + 328, + 305 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize.", + "bbox": [ + 228, + 309, + 823, + 393 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 398, + 308, + 411 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not use existing assets.", + "- The authors should cite the original paper that produced the code package or dataset.", + "- The authors should state which version of the asset is used and, if possible, include a URL.", + "- The name of the license (e.g., CC-BY 4.0) should be included for each asset.", + "- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided.", + "- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset.", + "- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided.", + "- If this information is not available online, the authors are encouraged to reach out to the asset's creators." + ], + "bbox": [ + 230, + 414, + 823, + 628 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "13. New assets", + "text_level": 1, + "bbox": [ + 202, + 635, + 310, + 648 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?", + "bbox": [ + 228, + 654, + 823, + 684 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 230, + 688, + 328, + 703 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Justification: We will release code for our VSLS framework upon publication, as mentioned in the abstract. The code will be accompanied by comprehensive documentation detailing the implementation of our four logical dependencies (spatial, temporal, attribute, and causal), the iterative refinement process, and instructions for reproducing our experimental results. Our paper does not introduce new datasets but rather evaluates our method on existing benchmarks including LONGVIDEOBENCH, VIDEO-MME, and HAYSTACK-LVBENCH, which are properly cited throughout the paper.", + "bbox": [ + 228, + 708, + 826, + 806 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 810, + 308, + 823 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not release new assets.", + "- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc.", + "- The paper should discuss whether and how consent was obtained from people whose asset is used." + ], + "bbox": [ + 230, + 825, + 826, + 910 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file.", + "bbox": [ + 230, + 90, + 823, + 119 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "14. Crowdsourcing and research with human subjects", + "text_level": 1, + "bbox": [ + 202, + 125, + 584, + 140 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?", + "bbox": [ + 228, + 143, + 823, + 186 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Answer: [NA]", + "bbox": [ + 230, + 191, + 328, + 205 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Justification: Our research does not involve crowdsourcing or human subject experiments. We evaluate our method using existing benchmarks (LONGVIDEOBENCH,VIDEO-MME, LONGVIDEOBENCH) that contain human-annotated ground truth data, but we did not collect new human annotations or conduct human evaluations as part of our work. Our methodology is purely algorithmic, focusing on the semantic-logical frameworks for keyframe selection and evaluation through computational metrics.", + "bbox": [ + 228, + 210, + 826, + 294 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 299, + 308, + 311 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.", + "- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper.", + "- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector." + ], + "bbox": [ + 230, + 314, + 823, + 414 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "15. Institutional review board (IRB) approvals or equivalent for research with human subjects", + "text_level": 1, + "bbox": [ + 202, + 417, + 823, + 446 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?", + "bbox": [ + 228, + 452, + 825, + 507 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Answer: [NA]", + "bbox": [ + 230, + 513, + 328, + 527 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Justification: Our research does not involve human subjects. We utilize existing benchmark datasets (LONGVIDEOBENCH, VIDEO-MME, HAYSTACK-LVBENCH) without collecting new data from human participants. Our work focuses on developing and evaluating algorithmic approaches for keyframe selection based on semantic-logical relationships, which do not require IRB approval or equivalent ethical review processes.", + "bbox": [ + 228, + 532, + 826, + 602 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 607, + 308, + 619 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.", + "- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper.", + "- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution.", + "- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review." + ], + "bbox": [ + 230, + 622, + 823, + 763 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "16. Declaration of LLM usage", + "text_level": 1, + "bbox": [ + 202, + 768, + 419, + 784 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required.", + "bbox": [ + 228, + 789, + 826, + 845 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Answer: [Yes]", + "bbox": [ + 230, + 849, + 328, + 864 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Justification: Our Visual Semantic-Logical Search framework uses LLMs (specifically mentioned in Section 3.2 and Figure 2) as part of our query decomposition process. We employ models such as LLAVA-7B and GPT-40 to extract semantic information from", + "bbox": [ + 228, + 869, + 826, + 911 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "textual queries, including key objects, cue objects, and their logical relationships. This LLM-based decomposition is an integral component of our method, as it enables the identification of the four logical relation types (spatial, temporal, attribute, and causal) that guide our keyframe selection process. The prompt template for this query grounding is provided in Appendix H.", + "bbox": [ + 233, + 89, + 828, + 157 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Guidelines:", + "text_level": 1, + "bbox": [ + 230, + 165, + 308, + 178 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components.", + "bbox": [ + 230, + 180, + 823, + 208 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described.", + "bbox": [ + 230, + 209, + 823, + 236 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_model.json b/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bab151c0a1a5f7f557aa9da4cf0aaf429034ce80 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_model.json @@ -0,0 +1,7579 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.226, + 0.123, + 0.777, + 0.201 + ], + "angle": 0, + "content": "Logic-in-Frames: Dynamic Keyframe Search via Visual Semantic-Logical Verification for Long Video Understanding" + }, + { + "type": "text", + "bbox": [ + 0.236, + 0.238, + 0.765, + 0.266 + ], + "angle": 0, + "content": "Weiyu Guo Ziyang Chen Shaoguang Wang Jianxiang He Yijie Xu AI Thrust, HKUST(GZ)" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.268, + 0.766, + 0.282 + ], + "angle": 0, + "content": "{wguo395, zchen483, swang440, jhe307, yxu409}@connect.hkust-gz.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.345, + 0.284, + 0.414, + 0.297 + ], + "angle": 0, + "content": "Jinhui Ye" + }, + { + "type": "text", + "bbox": [ + 0.299, + 0.299, + 0.46, + 0.312 + ], + "angle": 0, + "content": "Shanghai AI Laboratory" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.314, + 0.455, + 0.326 + ], + "angle": 0, + "content": "jinhuiyes@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.284, + 0.691, + 0.298 + ], + "angle": 0, + "content": "Ying Sun* Hui Xiong*" + }, + { + "type": "text", + "bbox": [ + 0.526, + 0.299, + 0.687, + 0.311 + ], + "angle": 0, + "content": "AI Thrust, HKUST(GZ)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.313, + 0.7, + 0.326 + ], + "angle": 0, + "content": "{yings, xionghui}@ust.hk" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.336, + 0.538, + 0.352 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.361, + 0.77, + 0.61 + ], + "angle": 0, + "content": "Understanding long video content is a complex endeavor that often relies on densely sampled frame captions or end-to-end feature selectors, yet these techniques commonly overlook the logical relationships between textual queries and visual elements. In practice, computational constraints necessitate coarse frame subsampling, a challenge analogous to \"finding a needle in a haystack.\" To address this issue, we introduce a semantics-driven search framework that reformulates keyframe selection under the paradigm of Visual Semantic-Logical Search. Specifically, we systematically define four fundamental logical dependencies: 1) spatial co-occurrence, 2) temporal proximity, 3) attribute dependency, and 4) causal order. These relations dynamically update frame sampling distributions through an iterative refinement process, enabling context-aware identification of semantically critical frames tailored to specific query requirements. Our method establishes new SOTA performance on the manually annotated benchmark in key-frame selection metrics. Furthermore, when applied to downstream video question-answering tasks, the proposed approach demonstrates the best performance gains over existing methods on LONGVIDEOBENCH and VIDEO-MME, validating its effectiveness in bridging the logical gap between textual queries and visual-temporal reasoning. The code will be publicly available." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.619, + 0.314, + 0.635 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.643, + 0.828, + 0.743 + ], + "angle": 0, + "content": "Vision-Language Models (VLMs) Yin et al. (2024) have achieved remarkable progress in video understanding Zou et al. (2024); Tang et al. (2023), particularly in video question answering Wang et al. (2024c); Zhang et al. (2023), demonstrating potential for modeling real-world scenarios. However, existing methods can only simultaneously process a limited number of frames due to the inherent token limit and extremely high dimension of spatio-temporal video data, especially for long videos. Furthermore, uniformly sampled keyframes are query-agnostic and insufficient to represent query-related contents. To tackle these challenges, this paper addresses a pivotal research question:" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.747, + 0.768, + 0.775 + ], + "angle": 0, + "content": "How can we efficiently and accurately select keyframes that are semantically critical for answering video-based queries?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.781, + 0.828, + 0.88 + ], + "angle": 0, + "content": "We hypothesize that deconstructing visual semantic and logical cues (e.g., target objects, logical relations including temporal, spatial, attribute, and causal relationships between visual entities) from textual queries enables effective identification of task-relevant frames through heuristic sampling and search. Building on this insight, we propose Visual Semantic-Logical Search (VSLS), a novel keyframe search framework that incorporates target object confidence estimation and joint verification of visual semantic logic into the iterative update of frame sampling distribution and selects the most informative frames with the highest confidence. Experimental results show that our" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.887, + 0.341, + 0.901 + ], + "angle": 0, + "content": "*Corresponding authors." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.256, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2503.13139v2 [cs.CV] 17 May 2025" + }, + { + "type": "footer", + "bbox": [ + 0.173, + 0.923, + 0.228, + 0.938 + ], + "angle": 0, + "content": "Preprint." + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.177, + 0.09, + 0.228, + 0.101 + ], + "angle": 0, + "content": "Temporal" + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.091, + 0.454, + 0.1 + ], + "angle": 0, + "content": "(text, time, pen)" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.102, + 0.478, + 0.112 + ], + "angle": 0, + "content": "Q: In the video, what color pen did the author use when he wrote" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.112, + 0.406, + 0.121 + ], + "angle": 0, + "content": "```\n\\\"guitar\\\" for the second time?" + }, + { + "type": "image_caption", + "bbox": [ + 0.185, + 0.12, + 0.226, + 0.127 + ], + "angle": 0, + "content": "A) Brown" + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.12, + 0.387, + 0.127 + ], + "angle": 0, + "content": "B) Pink" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.13, + 0.333, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.13, + 0.498, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.187, + 0.218, + 0.197 + ], + "angle": 0, + "content": "Spatial" + }, + { + "type": "image_caption", + "bbox": [ + 0.346, + 0.187, + 0.493, + 0.197 + ], + "angle": 0, + "content": "(copilot, spatial, Egyptian Pyramids)" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.2, + 0.496, + 0.21 + ], + "angle": 0, + "content": "Q:At the end of the animation, which building does the airplane fly over?" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.21, + 0.275, + 0.218 + ], + "angle": 0, + "content": "A) The Eiffel Tower." + }, + { + "type": "text", + "bbox": [ + 0.35, + 0.21, + 0.459, + 0.218 + ], + "angle": 0, + "content": "B) The Egyptian Pyramids" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.224, + 0.334, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.185, + 0.334, + 0.291, + 0.341 + ], + "angle": 0, + "content": "RED—Baseline Answer" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.224, + 0.499, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.334, + 0.404, + 0.341 + ], + "angle": 0, + "content": "Our Answer" + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.091, + 0.551, + 0.1 + ], + "angle": 0, + "content": "Attribute" + }, + { + "type": "image_caption", + "bbox": [ + 0.683, + 0.091, + 0.803, + 0.099 + ], + "angle": 0, + "content": "(man, attribute, white shirt)" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.101, + 0.804, + 0.119 + ], + "angle": 0, + "content": "Q: In a room with a wall tiger and a map on the wall, there is a man wearing a white shirt. What is he doing?" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.12, + 0.627, + 0.128 + ], + "angle": 0, + "content": "A) gazing at a circuit board" + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.12, + 0.719, + 0.128 + ], + "angle": 0, + "content": "B) speaking" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.13, + 0.657, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.13, + 0.82, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.187, + 0.541, + 0.197 + ], + "angle": 0, + "content": "Causal" + }, + { + "type": "image_caption", + "bbox": [ + 0.686, + 0.188, + 0.794, + 0.196 + ], + "angle": 0, + "content": "(man, causal, basketball)" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.197, + 0.819, + 0.206 + ], + "angle": 0, + "content": "Q:After a man wearing a red short-sleeved shirt and a black hat finished" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.206, + 0.77, + 0.214 + ], + "angle": 0, + "content": "speaking in front of a black background, what did this me" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.214, + 0.632, + 0.223 + ], + "angle": 0, + "content": "A) picked up a mobile phone." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.224, + 0.66, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.224, + 0.82, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.348, + 0.827, + 0.404 + ], + "angle": 0, + "content": "Figure 1: Examples of four types of visual semantic-logical relationships in video QA detected by our VSLS framework: Temporal (text, time, pen), Attribute (man, attribute, white shirt), Spatial (copilot, spatial, Egyptian Pyramids), and Causal (man, causal, basketball). Green boxes indicate correct answers, while red boxes show baseline errors." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.425, + 0.825, + 0.468 + ], + "angle": 0, + "content": "approach requires only sparse sampling (1.4% of frames per video on average) to identify critical frames, significantly reducing computational complexity compared to conventional dense sampling strategies while maintaining performance on downstream video understanding tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.827, + 0.585 + ], + "angle": 0, + "content": "Compared to conventional methods, VSLS shows three distinct advantages. First, the framework is training-free and highly efficient in comparison with dense captioning Chen et al. (2024c); Kim et al. (2024); Wang et al. (2024b) or video clustering Wang et al. (2024e); Rajan and Parameswaran (2025) strategies, sampling only \\(1.4\\%\\) of frames on average in LVHAYSTACK. Second, it explicitly models logical binary relations (namely spatial, temporal, attribute, and causal) in the query beyond simple target detection Ye et al. (2025b), utilizing additional visual semantic features and enhancing logical consistency throughout the reasoning process. Third, VSLS is a plug-and-play module, which can be seamlessly integrated into existing VLM pipelines without cross-component dependencies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.591, + 0.828, + 0.772 + ], + "angle": 0, + "content": "We further examine VSLS on several public datasets, including LONGVIDEOBENCH Ye et al. (2025a), a comprehensive benchmark for long video understanding; VIDEO-MME Fu et al. (2024), a widely adopted multimodal video question answering dataset; and HAYSTACK-LVBENCH Ye et al. (2025a) with meticulously annotated keyframes based on human feedback for more precise analysis. Extensive experiments demonstrate significant improvements in both the semantic similarity and temporal coverage between the retrieved keyframes and the ground truth labels, as well as the accuracy in downstream video question-answering tasks. More importantly, with only \\(1.4\\%\\) of video frames (EGO4D Grauman et al. (2022)) sampled in the search iteration, our method achieves an \\(8.7\\%\\) improvement in GPT-4o Hurst et al. (2024)'s long video QA accuracy. This performance gain is attributed to our simple yet powerful observation: query-guided visual semantic logic retrieval can mitigate the gap between potential visual logic in video frames and the logic expressed in the query. To be specific, constructing ternary logic triplets with visual elements (e.g., object1, logic type, object2) can enhance downstream reasoning capabilities when performing textual-visual retrieval." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.825, + 0.818 + ], + "angle": 0, + "content": "To the best of our knowledge, we are arguably the first to search for keyframes in long videos by detecting visual semantic logic, with potential extensions to other textual-visual retrieval tasks. Our main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.824, + 0.826, + 0.851 + ], + "angle": 0, + "content": "- We define four fundamental types of semantic logic relations in video QA tasks, including temporal, causal, attribute, and spatial relations, which can be accurately detected across various datasets." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.852, + 0.825, + 0.881 + ], + "angle": 0, + "content": "- We sample only \\(1.4\\%\\) of frames on average of frames on average during keyframe search through heuristic sampling and distribution updating by different visual semantics and logical relations." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.882, + 0.826, + 0.923 + ], + "angle": 0, + "content": "- We comprehensively evaluate retrieval efficiency, semantic similarity, temporal coverage, and video question answering accuracy across several widely used video understanding datasets, demonstrating significant improvements in downstream tasks." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.824, + 0.826, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.176, + 0.089, + 0.825, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.342 + ], + "angle": 0, + "content": "Figure 2: Our VSLS Framework for Efficient Keyframe Selection. VSLS sparsely samples frames and selects key ones via object detection and logic verification. Steps: 1) Use LLM&VLM to extract cue/target objects and four logic types (spatial, temporal, attribute, causal); 2) Adaptive sampling with evolving confidence; 3) Detect objects viaYOLO-WORLD; 4) Fuse scores with a spline function to identify high-confidence frames for downstream tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.348, + 0.273, + 0.364 + ], + "angle": 0, + "content": "2 Method" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.37, + 0.827, + 0.453 + ], + "angle": 0, + "content": "Although existing long-context VLM frameworks implement keyframe search for video QA tasks Liang et al. (2024); Park et al. (2024); Tan et al. (2024); Wang et al. (2024a,d); Yu et al. (2024), their computational efficiency and searching accuracy remain suboptimal. To address this needle-in-a-haystack challenge Wang et al. (2025); Zhao et al. (2024), we propose a novel method VSLS that aligns the semantic relations between the text modality and video modality, enhancing the plausibility of logical reasoning and performance of downstream tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.464, + 0.339, + 0.477 + ], + "angle": 0, + "content": "2.1 Task Formulation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.525 + ], + "angle": 0, + "content": "Given a video sequence \\( V = \\{f_t\\}_{t=1}^{N_v} \\) with \\( N_v \\) frames and a query \\( Q \\), the ideal temporal search framework aims to retrieve the minimal keyframe subset \\( V^K = \\{f_{m_i}\\}_{i=1}^K \\subseteq V \\) with \\( K \\) keyframes that satisfies:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.53, + 0.826, + 0.559 + ], + "angle": 0, + "content": "- Conservation: The keyframe subset \\( V^K \\subseteq V \\) must satisfy the answer consistency condition: \\( \\mathcal{A}(V^K, Q) = \\mathcal{A}(V, Q) \\), where \\( \\mathcal{A}(\\cdot) \\) denotes the video QA function." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.559, + 0.825, + 0.588 + ], + "angle": 0, + "content": "- Compactness: \\( V^K \\) must be a minimal subset that preserves completeness, which means that no frame in \\( V^K \\) can be removed without hindering the accuracy and efficiency of video QA." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.53, + 0.826, + 0.588 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.597, + 0.447, + 0.611 + ], + "angle": 0, + "content": "2.2 Visual Semantic Logic Extraction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.827, + 0.647 + ], + "angle": 0, + "content": "Starting from a question \\( Q \\) and uniformly sampled frames \\( \\overline{V}_N \\) from video \\( V \\), our goal is to extract key visual elements to answer \\( Q \\). We first classify the detected objects in \\( Q \\) and \\( \\overline{V}_N \\) into two categories:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.65, + 0.826, + 0.679 + ], + "angle": 0, + "content": "- Key Objects: The main participants or references in the scene that the question explicitly or implicitly focuses on (e.g., \"person\", \"microphone\")." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.679, + 0.825, + 0.708 + ], + "angle": 0, + "content": "- Cue Objects: Secondary or contextual entities that help locate or disambiguate the Key Objects (e.g., \"book\", \"tiger painting\")." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.65, + 0.826, + 0.708 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.712, + 0.825, + 0.755 + ], + "angle": 0, + "content": "To further leverage semantic and logical links among these objects, we define a set of relations \\(\\mathcal{R} \\subseteq \\mathcal{O} \\times \\Delta \\times \\mathcal{O}\\), where each relation \\(r = (o_i, \\delta, o_j) \\in \\mathcal{R}\\), with \\(o_i, o_j \\in \\mathcal{O}\\) denoting detected objects in the key and cue objects dataset, and \\(\\delta \\in \\Delta\\) representing one of the following types of relations:" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.757, + 0.819, + 0.91 + ], + "angle": 0, + "content": "
Spatial Co-occurrenceAttribute Dependency
oi and oj appear in the same frame, indicating co-occurrence or proximity. \nExample: “A person is standing beside a vase.” \n⇒ (person, spatial, vase)oi and oj share visual properties, e.g., color or size. \nExample: “A person wears a black shirt.” ⇒ \n(person, attribute, black shirt)
Temporal ProximityCausal Order
oi and oj occur in close frames, linking sequences or transitions. \nExample: “After a dog entered the room, a cat entered.” ⇒ (dog, temporal, cat)oi and oj follow a cause-effect or prerequisite order. \nExample: “A little girl broke the vase.” ⇒ \n(little girl, causal, pieces)
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.095, + 0.501, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: Visual Semantic-Logical Search" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.112, + 0.825, + 0.369 + ], + "angle": 0, + "content": "Function SemanticLogicalTemporalSearch(V,Q,K, \\(\\Delta_t,\\tau ,\\alpha ,\\gamma\\) \n\\(\\mathcal{O},\\mathcal{R}\\gets\\) ParseQuestion(Q) // Extract key/cue objects and relations \n\\(P\\leftarrow\\) Uniform, \\(B\\leftarrow |V|,S\\leftarrow \\emptyset ,N_{v}\\leftarrow |V|\\) // Initialize distribution and state \nwhile \\(B > 0\\) and \\(|\\mathcal{O}| > 0\\) do \n\\(k\\gets \\lfloor \\sqrt{B}\\rfloor ,G\\gets\\) Grid(Sample \\((P,k^2)\\)) // Adaptive grid sampling \n\\(\\Omega \\gets\\) DetectObjects(G) // Detect objects in sampled frames \nforeach \\(t\\in G\\) do \n\\(C_t\\gets\\) CalculateBaseScore( \\(\\Omega_t\\) ) // Base detection confidence \nforeach \\(r_{type}\\in \\mathcal{R}\\) do \n\\(\\delta \\gets\\) Processrelation(rtype, \\(\\Omega ,\\Delta_t,\\tau ,\\alpha ,\\gamma)\\) //relations require distinct processing \n\\(C_t\\gets C_t + \\delta\\) UpdateScores \\((S,t,C_t)\\) //Update global score registry \nDiffuseScores(S,w) // Temporal context propagation \n\\(P\\gets\\) NormalizeDistribution(S), \\(B\\gets B - k^{2}\\) // Update sampling distribution \nforeach \\(g\\in \\mathrm{TopK}(S,K)\\) do \nif \\(\\Omega [g]\\cap \\mathcal{O}\\neq \\emptyset\\) then // Remove identified key objects \n\\(\\begin{array}{rlrl} & {\\mathcal{O}} & {\\leftarrow \\mathcal{O}\\backslash \\Omega [g]} & {} \\end{array}\\)" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.4, + 0.827, + 0.526 + ], + "angle": 0, + "content": "The choice of these four relations draws on core concepts in linguistics and logic Cohen (1968); Sowa (2000); Talmy (2000), which identify spatial, temporal, attributive, and causal aspects as fundamental for structuring, perceiving, and communicating information about events and states. For more details on this selection, please see appendix A for reference. As shown in Figure 1, we construct semantic-logical relations that support a broad range of question-answering tasks. Specifically, questions involving temporal queries (when does \\( X \\) happen?), causal reasoning (why did \\( Y \\) occur?\"), attribute dependence (What is the person wearing sunglasses doing?), or spatial constraints (Who is standing next to the red car?) can be answered more reliably by incorporating these structured relations and contextual cues." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.536, + 0.523, + 0.551 + ], + "angle": 0, + "content": "2.3 Iterative Semantic-Logical Temporal Search" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.825, + 0.626 + ], + "angle": 0, + "content": "Based on the extracted key and cue objects and their logic relations, our algorithm iteratively searches for keyframes through semantic and logical reasoning, including four main stages: Frame Sampling (Sec. 2.3.1), Object Detection and Scoring (Sec. 2.3.2), Visual Semantic Logic Detection (Sec. 2.3.3), and Distribution Update (Sec. 2.3.4). The pseudocode is shown in Algorithm 1, and Algorithm 2 provides a more detailed version." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.634, + 0.345, + 0.649 + ], + "angle": 0, + "content": "2.3.1 Frame Sampling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.825, + 0.706 + ], + "angle": 0, + "content": "To accelerate the search process, we avoid exhaustively scanning all \\( N_v \\) video frames and instead employ a distributed sampling strategy. Let \\( N_v \\) denote the total number of frames in the video, and \\( P \\) be a uniformly initialized sampling distribution over all frames. The sampling process is then defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.707, + 0.825, + 0.723 + ], + "angle": 0, + "content": "\\[\nI _ {s} = \\operatorname {S a m p l e} \\left(P \\odot N _ {v}, N _ {s}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.825, + 0.77 + ], + "angle": 0, + "content": "where \\(\\mathrm{Sample}(\\cdot ,N_s)\\) selects a subset of \\(N_{s}\\) frames according to the distribution \\(P\\odot N_v\\) . To further leverage the detecting ability ofYOLO, we stack the sampled frames into a \\(k\\times k\\) grid, which imposes a constraint on the sample size \\(N_{s}\\) . Specifically, we require:" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.776, + 0.825, + 0.794 + ], + "angle": 0, + "content": "\\[\nN _ {s} \\in \\{k ^ {2} \\mid k \\in \\mathbb {Z} \\} \\quad \\text {a n d} \\quad N _ {s} < N _ {v}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.826, + 0.844 + ], + "angle": 0, + "content": "In practice, this ensures that the number of sampled frames can be reshaped into a compact 2D grid for efficient processing. Although \\( P \\) is initially uniform, it can be adapted over multiple rounds of sampling to focus on frames of higher interest in the video." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.852, + 0.433, + 0.868 + ], + "angle": 0, + "content": "2.3.2 Object Detection and Scoring" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In this stage, we construct the detection search space by taking the union of both key objects and cue objects. For each iteration, we detect objects on the \\(N_{s}\\) sampled frames using a lightweight model like YOLO-WORLD Cheng et al. (2024a) for high efficiency and score the frames based on detection" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.173, + 0.093, + 0.825, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.238, + 0.825, + 0.294 + ], + "angle": 0, + "content": "Figure 3: Sample weight evolution under VSLS optimization for keyframe selection. Top: 16 iterations show progressive convergence toward Ground Truth (red). Bottom: 15 iterations demonstrate similar alignment. Yellow highlights indicate precise matches between algorithm outputs (green) and manual annotations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.303, + 0.825, + 0.334 + ], + "angle": 0, + "content": "confidence. Specifically, let \\(\\Omega_t\\) be the set of detected objects in the frame at time \\(t\\), \\(c_o\\) the confidence of each detected object, and \\(w_o\\) the corresponding weight. We define the frame score as:" + }, + { + "type": "equation", + "bbox": [ + 0.429, + 0.339, + 0.826, + 0.363 + ], + "angle": 0, + "content": "\\[\nC _ {t} = \\max _ {o \\in \\Omega_ {t}} \\left(c _ {o} \\cdot w _ {o}\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.371, + 0.827, + 0.402 + ], + "angle": 0, + "content": "If the confidence score of any key object exceeds a predefined threshold, it is added to a list, thereby maintaining a record of frames where crucial targets have been identified for subsequent processing." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.409, + 0.453, + 0.424 + ], + "angle": 0, + "content": "2.3.3 Visual Semantic Logic Detection" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.427, + 0.825, + 0.503 + ], + "angle": 0, + "content": "Beyond individual object detection and frame-level scoring, we refine each frame's confidence score by modeling higher-order object relations. Let \\(\\mathcal{R}\\) be the set of relations, where each \\(r\\in \\mathcal{R}\\) involves a pair \\((o_1,o_2)\\) and is labeled by a type \\(r_{\\mathrm{type}}\\). Denote \\(C_t\\) as the confidence score at time \\(t\\), with a global scaling factor \\(\\alpha\\) and a relation-specific weight \\(\\gamma_{r_{\\mathrm{type}}}\\) controlling each logic type's impact. The refined confidence \\(C_t^{(r)}\\) after applying relation \\(r\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.422, + 0.511, + 0.826, + 0.533 + ], + "angle": 0, + "content": "\\[\nC _ {t} ^ {(r)} = C _ {t} + \\alpha \\cdot \\gamma_ {r _ {\\text {t y p e}}}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.539, + 0.825, + 0.582 + ], + "angle": 0, + "content": "Spatial Relation. A spatial relation enforces that two objects \\( o_1 \\) and \\( o_2 \\) must co-occur in the same frame. Let \\( \\Omega_t \\) be the set of detected objects in frame \\( t \\). If both \\( o_1 \\in \\Omega_t \\) and \\( o_2 \\in \\Omega_t \\), then the corresponding frame confidence is updated as:" + }, + { + "type": "equation", + "bbox": [ + 0.422, + 0.59, + 0.826, + 0.607 + ], + "angle": 0, + "content": "\\[\nC _ {t} \\leftarrow C _ {t} + \\alpha \\cdot \\gamma_ {\\text {s p a t i a l}}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.614, + 0.825, + 0.672 + ], + "angle": 0, + "content": "Attribute Relation. An attribute relation is satisfied when \\( o_1 \\) and \\( o_2 \\) share sufficient bounding-box overlap in the same frame. Let overlap be the ratio of their intersection area to the minimum of their individual bounding-box areas. If the overlap ratio exceeds a predefined threshold \\( \\tau \\) (\\( \\tau = 0.5 \\) in our experimental setting), we increase the frame confidence:" + }, + { + "type": "equation", + "bbox": [ + 0.418, + 0.68, + 0.826, + 0.695 + ], + "angle": 0, + "content": "\\[\nC _ {t} \\leftarrow C _ {t} + \\alpha \\cdot \\gamma_ {\\text {a t t r i b u t e}}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.827, + 0.76 + ], + "angle": 0, + "content": "Time Relation. A time relation checks whether two objects appear in temporally close frames. Suppose \\( t_i \\) and \\( t_j \\) (\\( t_i \\leq t_j \\)) are sampled such that \\( |t_j - t_i| < \\Delta_t \\), where \\( \\Delta_t \\) is a threshold (e.g. 5 frames in our experimental setting), if \\( o_1 \\) occurs in frame \\( t_i \\) and \\( o_2 \\) in frame \\( t_j \\), then both frames' confidence are updated:" + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.768, + 0.826, + 0.786 + ], + "angle": 0, + "content": "\\[\nC _ {t _ {i}} \\leftarrow C _ {t _ {i}} + \\alpha \\cdot \\gamma_ {\\text {t i m e}}, \\quad C _ {t _ {j}} \\leftarrow C _ {t _ {j}} + \\alpha \\cdot \\gamma_ {\\text {t i m e}}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.793, + 0.825, + 0.837 + ], + "angle": 0, + "content": "Causal Relation. A causal relation models an ordering constraint, enforcing that \\( o_1 \\) must appear at an earlier time than \\( o_2 \\). Specifically, if \\( o_1 \\in \\Omega_{t_i} \\) and \\( o_2 \\in \\Omega_{t_j} \\) with \\( t_i < t_j \\), we update the confidence of frames \\( t_i \\) and \\( t_j \\) by:" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.844, + 0.826, + 0.862 + ], + "angle": 0, + "content": "\\[\nC _ {t _ {i}} \\leftarrow C _ {t _ {i}} + \\alpha \\cdot \\gamma_ {\\text {c a u s a l}}, \\quad C _ {t _ {j}} \\leftarrow C _ {t _ {j}} + \\alpha \\cdot \\gamma_ {\\text {c a u s a l}}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.869, + 0.826, + 0.924 + ], + "angle": 0, + "content": "Through this scoring mechanism, frames with detected relations will have greater confidence and are more likely to be retrieved as keyframes for the given query and video. We have also conducted hyperparameter search experiments, and find that \\(\\alpha = 0.3\\) (from 0.3, 0.5, 0.7, 1.0) and \\(\\gamma_{r_{\\mathrm{type}}} = 0.5\\) achieve the best results across different datasets." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.367, + 0.107 + ], + "angle": 0, + "content": "2.3.4 Distribution Update" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.11, + 0.825, + 0.18 + ], + "angle": 0, + "content": "After each iteration of frame sampling, we merge newly obtained frame confidences into the global score distribution \\(\\{S_f\\}\\) spanning all frames \\(f = 1,2,\\dots ,N_v\\). When a frame \\(f\\) is selected for detection, its score is assigned to the confidence value \\(C_f\\), and the visitation counter \\(N_{v,f}\\) is reset to 0. To incorporate temporal context, we diffuse this updated score to neighboring frames within a window of size \\(w\\). Denoting each nearby index by \\(f\\pm \\delta\\) (for \\(\\delta \\in [-w,w]\\)), we apply:" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.184, + 0.826, + 0.218 + ], + "angle": 0, + "content": "\\[\nS _ {f \\pm \\delta} \\leftarrow \\max \\left(S _ {f \\pm \\delta}, \\frac {S _ {f}}{1 + | \\delta |}\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.222, + 0.827, + 0.292 + ], + "angle": 0, + "content": "In this way, high-confidence frames raise the scores of close-by frames, reflecting temporal continuity. Following these local updates, the sampling distribution \\( P \\) is refined using spline interpolation, and then normalized. This iteration proceeds until either the search budget \\( B \\) is reached or all key objects have been successfully identified. The visualization of distribution in different iterations can be seen in Figure 3. Finally, the method outputs the top \\( K \\) frames according to their terminal scores." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.298, + 0.307, + 0.314 + ], + "angle": 0, + "content": "3 Experiment" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.318, + 0.359, + 0.332 + ], + "angle": 0, + "content": "3.1 Benchmark Datasets" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.337, + 0.827, + 0.449 + ], + "angle": 0, + "content": "The proposed VSLS is systematically evaluated across four benchmark datasets: a) LONGVIDEOBENCH Ye et al. (2025a) for assessing long-context video-language comprehension capabilities; b) Video-MME Fu et al. (2024) as the first comprehensive benchmark for multimodal video analytics; c) HAYSTACK-LVBENCH, extended from LONGVIDEOBENCH with human-annotated frame index answers; and d) HAYSTACK-EGO4D, derived from EGO4D with similar annotations. While LONGVIDEOBENCH and Video-MME measure performance enhancement in QA accuracy, HAYSTACK-EGO4D and HAYSTACK-LVBENCH quantitatively evaluate keyframe selection accuracy through recall and precision metrics. Further details of datasets are provided in Appendix D." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.455, + 0.348, + 0.468 + ], + "angle": 0, + "content": "3.2 Evaluation Metrics" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.474, + 0.484, + 0.489 + ], + "angle": 0, + "content": "3.2.1 Evaluation Metrics for Search Utility" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.561 + ], + "angle": 0, + "content": "Our assessment framework emphasizes both effectiveness and efficiency. For search effectiveness, we use three metrics to compare model-predicted keyframes with human annotations, considering both individual frames and full sets—addressing the possibility of multiple valid keyframe sets per query. For frame-level comparison, we evaluate the alignment between a predicted frame \\( f_{\\mathrm{pt}} \\) and a human-annotated frame \\( f_{\\mathrm{gt}} \\) from two perspectives:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.564, + 0.825, + 0.592 + ], + "angle": 0, + "content": "Temporal coverage evaluates the coverage of ground truth frames by predicted frames in the temporal perspective, which can be described as:" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.595, + 0.826, + 0.649 + ], + "angle": 0, + "content": "\\[\nT _ {\\text {c o v e r}} \\left(T _ {\\mathrm {p t}}, T _ {\\mathrm {g t}}\\right) = \\frac {\\sum_ {i = 1} ^ {| N _ {\\mathrm {g t}} |} \\mathbb {I} \\left[ \\min _ {j} \\left| t _ {\\mathrm {g t}} ^ {i} - t _ {\\mathrm {p t}} ^ {j} \\right| \\leq \\delta \\right]}{| N _ {\\mathrm {g t}} |}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.659, + 0.825, + 0.718 + ], + "angle": 0, + "content": "where \\( T_{\\mathrm{pt}} \\) and \\( T_{\\mathrm{gt}} \\) denote the sets of predicted and ground truth timestamps, respectively. Here, \\( |N_{\\mathrm{gt}}| \\) is the number of ground truth frames, \\( t_{\\mathrm{gt}}^i \\) and \\( t_{\\mathrm{pt}}^j \\) are the \\( i \\)-th ground truth and \\( j \\)-th predicted timestamps, respectively. \\( \\delta \\) is the temporal similarity threshold defining the maximum allowed time deviation, and \\( \\mathbb{I}[\\cdot] \\) is the indicator function, returning 1 if the condition holds and 0 otherwise." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.72, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Visual Similarity is measured by the Structural Similarity Index (SSIM) Brunet et al. (2012), capturing structural detail, luminance, and contrast between \\( f_{\\mathrm{pt}} \\) and \\( f_{\\mathrm{gt}} \\). For set-to-set comparison, the key challenge is defining inter-set similarity. We adopt Precision \\( P \\) and Recall \\( R \\) as complementary metrics: Precision checks whether each predicted frame matches any reference frame, while Recall ensures that all reference frames are represented. Given the ground truth set \\( F_{\\mathrm{gt}} = f^{j}\\mathrm{gt}^{n}j = 1 \\) and the predicted set \\( F_{\\mathrm{pt}} = f^{i}\\mathrm{pt}^{m}i = 1 \\), we define the multimodal retrieval quality metrics as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.819, + 0.826, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} P \\left(F _ {\\mathrm {p t}}, F _ {\\mathrm {g t}}\\right) = \\frac {1}{\\left| F _ {\\mathrm {p t}} \\right|} \\sum_ {f _ {\\mathrm {p t}} ^ {i} \\in F _ {\\mathrm {p t}}} \\max _ {f _ {\\mathrm {g t}} ^ {j} \\in F _ {\\mathrm {g t}}} \\phi \\left(f _ {\\mathrm {p t}} ^ {i}, f _ {\\mathrm {g t}} ^ {j}\\right), \\\\ R \\left(F _ {\\mathrm {p t}}, F _ {\\mathrm {g t}}\\right) = \\frac {1}{\\left| F _ {\\mathrm {g t}} \\right|} \\sum_ {f _ {\\mathrm {g t}} ^ {j} \\in F _ {\\mathrm {g t}}} \\max _ {f _ {\\mathrm {p t}} ^ {i} \\in F _ {\\mathrm {p t}}} \\phi \\left(f _ {\\mathrm {g t}} ^ {j}, f _ {\\mathrm {p t}} ^ {i}\\right), \\end{array} \\right. \\tag {11a}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.907, + 0.677, + 0.922 + ], + "angle": 0, + "content": "where \\(\\phi (\\cdot ,\\cdot)\\) represents an extensible multimodal similarity metric function." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.825, + 0.203 + ], + "angle": 0, + "content": "
MethodTraining RequiredSearching EfficiencyOverall Task Efficiency
MatchingIterationTFLOPs ↓Latency (sec) ↓Latency (sec) ↓Acc ↑
Static Frame Sampling
UNIFORM-8 Ye et al. (2025a)Training-BasedN/AN/AN/A0.23.853.7
Dense Retrieval
VIDEOAGENT Fan et al. (2024)Training-BasedCLIP-1B Radford et al. (2021)840536.530.234.949.2
T*-RETRIEVAL Ye et al. (2025b)Training-BasedYOLO-WORLD-110M840216.128.632.257.3
Temporal Search
T*-ATTENTION Ye et al. (2025b)Training-BasedN/AN/A88.913.717.359.3
T*-DETECTOR Ye et al. (2025b)Training-FreeYOLO-WORLD-110M4331.77.311.159.8
VSLS (OURS)-DETECTORTraining-FreeYOLO-WORLD-110M4933.37.811.661.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.21, + 0.825, + 0.239 + ], + "angle": 0, + "content": "Table 1: Evaluation of performance metrics across the LV-HAYSTACK benchmark, presenting both search efficiency and end-to-end processing overhead (combining search and inference stages)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.248, + 0.504, + 0.263 + ], + "angle": 0, + "content": "3.2.2 Evaluation Metrics for Search efficiency" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.266, + 0.827, + 0.336 + ], + "angle": 0, + "content": "Existing studies Fan et al. (2024); Park et al. (2024); Wang et al. (2024a,d); Wu and Xie (2023) have mainly concentrated on optimizing task-specific performance metrics while neglecting computational efficiency in temporal search operations. To systematically analyze this dimension, our evaluation framework incorporates two criteria: 1) FLOPs representing arithmetic operation complexity, and 2) Latency recording real-world execution duration." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.348, + 0.513, + 0.363 + ], + "angle": 0, + "content": "3.3 Evaluation of Search Framework efficiency" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.828, + 0.493 + ], + "angle": 0, + "content": "Current approaches for keyframe selection can be broadly categorized into three paradigms: statistic-based frame sampling, dense feature retrieval-based selection, and temporal search-based methods. As shown in Table 1, while uniform sampling achieves the fastest processing speed, its ignorance of frame semantics severely limits downstream task effectiveness. Although dense feature retrieval methods attain moderate accuracy improvements (57.3%), their exhaustive frame processing demands \\(4.2 \\times\\) more TFLOPs and introduces \\(4.5 \\times\\) higher latency than our temporal search approach. Crucially, our method introduces four visual semantic logic detectors during temporal search while maintaining comparable execution time to T* methods. This strategic design elevates downstream task accuracy to \\(61.5\\%\\), achieving the best performance-efficiency trade-off." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.504, + 0.515, + 0.518 + ], + "angle": 0, + "content": "3.4 Visual Semantic Logic Search Performance" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.827, + 0.635 + ], + "angle": 0, + "content": "As demonstrated in Table 2, we evaluate VSLS on LONGVIDEOBENCH from two critical perspectives: visual similarity (measured by precision and recall) and temporal coverage. Our method achieves state-of-the-art performance across all metrics. Specifically, under the 32-frame setting, VSLS attains a precision of \\(74.5\\%\\) and recall of \\(92.5\\%\\), outperforming all baselines in visual similarity. More notably, the temporal coverage of VSLS reaches \\(41.4\\%\\), surpassing the second-best method (\\(T* at 36.5\\%\\)) by \\(13.4\\%\\)—the largest margin among all comparisons. This significant improvement highlights the effectiveness of our visual semantic logic detection modules in identifying query-relevant keyframes with both semantic alignment and temporal completeness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.5, + 0.739 + ], + "angle": 0, + "content": "These results empirically support our core hypothesis: leveraging semantic and logical cues from text queries enables precise detection of relevant video frames. Improvements in visual similarity and temporal coverage confirm that VSLS effectively captures keyframes while preserving temporal coherence through visual-logical alignment." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.75, + 0.471, + 0.765 + ], + "angle": 0, + "content": "3.5 Downstream Video QA Performance" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.499, + 0.907 + ], + "angle": 0, + "content": "To demonstrate the advantages of VSLS, we evaluate downstream video QA performance on LONGVIDEOBENCH and VIDEO-MME. As shown in Table 3, videos are grouped by length into Short, Medium, and Long (15-3600s, up to 60 mins). VSLS consistently achieves the highest accuracy in the long-video category across different frame counts and QA models. Compared to the baseline T*, incorporating our visual semantic logic relations (Figure 1) yields substantial gains." + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.644, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Table 2: Search utility results on LONGVIDEOBENCH. Best scores in the 8-frame setting are underlined, and in the 32-frame setting are bold. Gray indicates results from the original paper." + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.72, + 0.827, + 0.893 + ], + "angle": 0, + "content": "
MethodFrameLONGVIDEOBENCH
Precision ↑Recall ↑Time ↑
Static Frame Sampling Method
UNIFORM Ye et al. (2025a)856.072.06.3
UNIFORM860.780.44.7
UNIFORM3258.781.624.9
UNIFORM3260.285.08.1
Dense Retrieval Method
VIDEOAGENT Fan et al. (2024)10.158.873.28.5
RETRIEVAL-BASED Ye et al. (2025b)863.165.56.3
RETRIEVAL-BASED3259.980.821.8
Temporal Searching Method
T* Ye et al. (2025b)858.472.77.1
T*875.388.226.2
VSLS (ours)875.688.626.3
T*3258.383.228.2
T*3274.090.336.5
VSLS (ours)3274.592.541.4
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.172, + 0.908, + 0.814, + 0.923 + ], + "angle": 0, + "content": "These results confirm that modeling visual-logical relations is key to effective QA on long videos." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.09, + 0.825, + 0.269 + ], + "angle": 0, + "content": "
LONGVIDEOBENCHVIDEO-MME
Model and SizeFrameVideo LengthModel and SizeFrameVideo Length
Long 900-3600sMedium 180-600sShort 15-60sLong 30-60minMedium 4-15min
GPT-4o Hurst et al. (2024)847.149.467.3GPT-4o855.260.2
GPT-4o + T*849.156.268.0GPT-4o + T*855.261.2
GPT-4o + VSLS (ours)851.258.974GPT-4o + VSLS (ours)856.960.7
INTERNVL 2.5-78B Chen et al. (2024d)855.757.374.0INTERNVL 2.5-78B852.655.5
INTERNVL 2.5-78B + VSLS (ours)858.061.574.0INTERNVL 2.5-78B + VSLS (ours)857.757.5
GPT-4o3253.856.574.0GPT-4o3255.261.0
GPT-4o + T*3255.358.872.0GPT-4o + T*3255.261.6
GPT-4o + VSLS (ours)3254.260.076.0GPT-4o + VSLS (ours)3255.261.9
LLAVA-ONEVISION-QWEN2-78B-OV3259.363.977.4LLaVA-OneVision-78B3260.062.2
PLLAVA-34B3249.150.866.8VIDEOLLAMA 23257.659.9
LLAVA-VIDEO-78B-QWEN212859.363.977.4ORYX-1.512859.365.3
MPLUG-OWL3-7B12853.958.873.7ARIA-8x3.5B25658.867.0
GPT-4o (0513)25661.666.776.8GEMINI-1.5-Pro (0615)1/0.5 fps67.474.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.276, + 0.825, + 0.334 + ], + "angle": 0, + "content": "Table 3: Downstream task evaluation results on two benchmarks. All accuracy scores (\\%) in black are from our replication. We also cite the reported accuracy of SOTA models in gray (noting that their settings may differ and results may not be reproducible), along with their number of frames used for QA inference, for full transparency." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.344, + 0.279, + 0.361 + ], + "angle": 0, + "content": "4 Analysis" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.37, + 0.555, + 0.385 + ], + "angle": 0, + "content": "4.1 Coverage Analysis of Semantic-Logical Relations" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.396, + 0.827, + 0.493 + ], + "angle": 0, + "content": "To ascertain the practical applicability and coverage of our defined semantic-logical relations (spatial, temporal, attribute, and causal), we conducted an analysis of their detection across all queries in the LongVideoBench and VideoMME datasets. Our findings reveal a crucial insight: for every question posed within these extensive VQA benchmarks, our query analysis module successfully identified and mapped the query to at least one of the four defined logical relation types. This empirical result supports the completeness of our proposed relation set for interpreting the semantic and logical intent inherent in these VQA tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.506, + 0.336, + 0.521 + ], + "angle": 0, + "content": "4.2 Time Complexity" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.526, + 0.5, + 0.678 + ], + "angle": 0, + "content": "The proposed framework consists of two stages. First, VLMs such as LLAVA-7B and GPT-40 extract a semantic set \\(S\\) from a video \\(V\\) with \\(n\\) frames. \\(S\\) includes target objects, cue objects, and their relations, with their size constrained by prompt design. In the second stage, keyframe identification is performed via a heuristic search: \\(k\\) candidates are iteratively selected using a scoring function \\(h(\\cdot ,S)\\). The score distribution scores \\([n]\\) is dynamically refined using outputs from the YOLO-WORLD detector." + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.531, + 0.822, + 0.676 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.686, + 0.827, + 0.771 + ], + "angle": 0, + "content": "Figure 4: Average occurrences of detected semantic-logical relation types per question on the VideoMME and LongVideoBench datasets. Spatial relations are the most frequently identified, while all queries in both datasets triggered at least one of the four relation types." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.684, + 0.5, + 0.781 + ], + "angle": 0, + "content": "Our analysis focuses on YOLO-WORLD detections, the main computational bottleneck due to their reliance on deep neural networks. Reducing the number of detections improves efficiency without sacrificing accuracy. At each iteration, the detector processes \\( k \\) selected frames to match objects and relations in \\( S \\), yielding \\( k \\) detections. The" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.78, + 0.827, + 0.852 + ], + "angle": 0, + "content": "search stops when all targets are found or the iteration budget \\(\\min(1000, 0.1 \\times V_t)\\) (with \\(V_t\\) as the video duration in seconds) is exhausted. In the worst case (e.g., videos with \\(>10,000\\) frames and no matches), the cap is 1,000 iterations. Ideally, the evaluation function \\(h(\\cdot, S)\\) assigns high confidence to target frames, making the algorithm resemble top-\\(k\\) selection over \\(n\\) candidates in \\(\\mathcal{O}(|S| \\log n)\\) iterations Ye et al. (2025b), resulting in an average of \\(\\mathcal{O}(|S| k \\log n)\\) YOLO-WORLD inferences." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.856, + 0.829, + 0.913 + ], + "angle": 0, + "content": "Experimental results also demonstrate that integrating relational information into the search algorithm incurs negligible computational overhead compared to the baseline T* approach. On the LV-HAYSTACK benchmark, the average iteration count increases from 42.94 (T*) to 48.82 iterations, representing a modest \\(13.69\\%\\) rise in the time cost." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.444, + 0.106 + ], + "angle": 0, + "content": "4.3 Ablation Study of Four Relations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.119, + 0.461, + 0.312 + ], + "angle": 0, + "content": "Figure 4 illustrates the distribution of four logic relation types across LONGVIDEOBENCH and VIDEO-MME datasets, where spatial relations predominate, followed by attribute relations. In Table 4, we extract samples containing different relation types from LONGVIDEOBENCH to compare the object detection-based T* method with our VSLS approach. Experimental results demonstrate that VSLS achieves significant improvements across both image similarity metrics (SSIM Precision and SSIM Recall). Additionally, temporal coverage shows marked enhance" + }, + { + "type": "table", + "bbox": [ + 0.478, + 0.099, + 0.819, + 0.244 + ], + "angle": 0, + "content": "
Logic TypeMethodLONGVIDEOBENCH
Precision ↑Recall ↑TC ↑
SpatialT*72.988.737.5
VSLS (ours)73.691.445.5
AttributeT*71.887.638.5
VSLS (ours)72.790.942.1
TimeT*76.789.237.3
VSLS (ours)77.592.536.1
CasualT*74.792.438.6
VSLS (ours)74.793.839.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.468, + 0.25, + 0.828, + 0.307 + ], + "angle": 0, + "content": "Table 4: Comparison of our method (VSLS) with the baseline across four logic relation types on LONGVIDEOBENCH. Precision: SSIM Precision; Recall: SSIM Recall; TC: Temporal Coverage." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.396 + ], + "angle": 0, + "content": "ment for attribute, spatial, and causal relations, with spatial relations exhibiting the most substantial improvement (21.3% increase over T*). For the time relation category, we observe a slight decrease in temporal coverage, which may be attributed to the relative scarcity of time relation samples in the dataset, limiting the opportunity to demonstrate the advantages of VSLS. Nevertheless, Figure 1 provides visual evidence of how effectively leveraging time relations can facilitate downstream question-answering tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.412, + 0.323, + 0.428 + ], + "angle": 0, + "content": "5 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.442, + 0.828, + 0.569 + ], + "angle": 0, + "content": "Challenges in Long Video Understanding: Long video understanding is inherently more challenging than short-video or image-based tasks due to its rich temporal dynamics and massive redundancy Qian et al. (2024); Zeng et al. (2024); Yu et al. (2019). The large number of frames increases both memory and computational requirements, making straightforward dense sampling infeasible. Moreover, crucial events may span distant timestamps, demanding high-capacity models to capture long-range dependencies Ranasinghe et al. (2025); Shi et al. (2024); Chen et al. (2024b); Weng et al. (2024). Meanwhile, the diverse and continuous visual content raises noise and distractors; thus, strategies to effectively locate or distill essential parts of the video are of primary importance Zhang et al. (2023); Cheng et al. (2024b); Xu et al. (2023); Ye et al. (2025b)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.574, + 0.827, + 0.727 + ], + "angle": 0, + "content": "Existing Solutions based on VLMs typically share three core ideas: 1) video sampling or retrieval for efficiency, 2) multi-stage or interactive reasoning to handle complex questions, and 3) compact representation to accommodate the VLM's limited context window. For instance, retrieval-based pipelines partition a video into segments and employ a learned or rule-based retriever to identify the relevant chunks before passing them to a VLM Pan et al. (2023); Choudhury et al. (2023, 2025). Other lines of research compress each frame into minimal tokens to reduce computational overhead Li et al. (2024); Chen et al. (2024a); Song et al. (2024), or adopt a streaming mechanism to propagate memory representations along the temporal axis Qian et al. (2024); Wu et al. (2022); Liu et al. (2024). Beyond these efficiency-oriented approaches, LLM/VLM-as-planner frameworks factorize the process into a series of perception queries, enabling an agent to fetch additional frame-level details if needed Wang et al. (2024b); Zhang et al. (2024); Liao et al. (2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.743, + 0.303, + 0.759 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.827, + 0.913 + ], + "angle": 0, + "content": "In this paper, we present Visual Semantic-Logical Search (VSLS), a novel framework that efficiently selects semantically keyframes for long video understanding by decomposing logical relationships between textual queries and visual elements. VSLS based on four defined logical dependencies (spatial co-occurrence, temporal proximity, attribute dependency, and causal order), significantly outperforms existing methods while sampling only \\(1.4\\%\\) of video frames. The \\(8.7\\%\\) improvement in GPT-40's long video QA accuracy demonstrates that query-guided visual semantic logic search effectively bridges the gap between textual queries and visual content. VSLS's plug-and-play nature enables seamless integration with existing pipelines, making it practical for real-world applications. Future work could consider more logical relations, learnable search methods, enhancing interpretability, and exploring more downstream tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.113, + 0.826, + 0.14 + ], + "angle": 0, + "content": "Dominique Brunet, Edward R. Vrscay, and Zhou Wang. On the mathematical properties of the structural similarity index. IEEE Transactions on Image Processing, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.147, + 0.827, + 0.174 + ], + "angle": 0, + "content": "Jieneng Chen, Luoxin Ye, Ju He, Zhao-Yang Wang, Daniel Khashabi, and Alan Yuille. Llavolta: Efficient multi-modal models via stage-wise visual context compression. In arXiv preprint arXiv:2406.20092, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.182, + 0.827, + 0.221 + ], + "angle": 0, + "content": "Jr-Jen Chen, Yu-Chien Liao, Hsi-Che Lin, Yu-Chu Yu, Yen-Chun Chen, and Yu-Chiang Frank Wang. ReXTime: A benchmark suite for reasoning-across-time in videos. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.229, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Zhenyu Tang, Li Yuan, et al. Sharegpt4video: Improving video understanding and generation with better captions. NeurIPS, 37:19472-19495, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.277, + 0.825, + 0.327 + ], + "angle": 0, + "content": "Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.336, + 0.825, + 0.363 + ], + "angle": 0, + "content": "Tianheng Cheng, Lin Song, Yixiao Ge, Wenyu Liu, Xinggang Wang, and Ying Shan. Yolo-world: Real-time open-vocabulary object detection. CVPR, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.371, + 0.826, + 0.41 + ], + "angle": 0, + "content": "Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, and Lidong Bing. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.419, + 0.825, + 0.446 + ], + "angle": 0, + "content": "Rohan Choudhury, Koichiro Niinuma, Kris M Kitani, and Laszlo A Jeni. Zero-shot video question answering with procedural programs. arXiv preprint arXiv:2312.00937, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.453, + 0.825, + 0.481 + ], + "angle": 0, + "content": "Rohan Choudhury, Koichiro Niinuma, Kris M. Kitani, and László A. Jeni. Video question answering with procedural programs. In ECCV, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.488, + 0.48, + 0.502 + ], + "angle": 0, + "content": "David Cohen. Universals in linguistic theory, 1968." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.51, + 0.825, + 0.537 + ], + "angle": 0, + "content": "Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. ArXiv, abs/2403.11481, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.545, + 0.773, + 0.56 + ], + "angle": 0, + "content": "Charles J Fillmore. The case for case. Bach and Harms (Ed.): Universals in Linguistic Theory, 1967." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.567, + 0.826, + 0.618 + ], + "angle": 0, + "content": "Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. ArXiv, abs/2405.21075, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.627, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18995-19012, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.687, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.722, + 0.825, + 0.748 + ], + "angle": 0, + "content": "Minkuk Kim, Hyeon Bae Kim, Jinyoung Moon, Jinwoo Choi, and Seong Tae Kim. Do you remember? dense video captioning with cross-modal memory retrieval. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.756, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In ECCV, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.791, + 0.825, + 0.818 + ], + "angle": 0, + "content": "Jianxin Liang, Xiaojun Meng, Yueqian Wang, Chang Liu, Qun Liu, and Dongyan Zhao. End-to-end video question answering with frame scoring mechanisms and adaptive sampling. ArXiv, abs/2407.15047, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.826, + 0.825, + 0.865 + ], + "angle": 0, + "content": "Ruotong Liao, Max Eler, Huiyu Wang, Guangyao Zhai, Gengyuan Zhang, Yunpu Ma, and Volker Tresp. Videoinsta: Zero-shot long video understanding via informative spatial-temporal reasoning with llms. In EMNLP Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.873, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Shilong Liu, Hao Cheng, Haotian Liu, Hao Zhang, Feng Li, Tianhe Ren, Xueyan Zou, Jianwei Yang, Hang Su, Jun Zhu, et al. Llava-plus: Learning to use tools for creating multimodal agents. In European Conference on Computer Vision, pages 126-142. Springer, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.113, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "William C Mann and Sandra A Thompson. Rhetorical structure theory: Toward a functional theory of text organization. Text-interdisciplinary Journal for the Study of Discourse, 8(3):243-281, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.128, + 0.825, + 0.155 + ], + "angle": 0, + "content": "Leland Gerson Neuberg. Causality: models, reasoning, and inference, by juda pearl, cambridge university press, 2000. Econometric Theory, 19(4):675-685, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.164, + 0.827, + 0.202 + ], + "angle": 0, + "content": "Junting Pan, Ziyi Lin, Yuying Ge, Xiatian Zhu, Renrui Zhang, Yi Wang, Yu Qiao, and Hongsheng Li. Retrieving-to-answer: Zero-shot video question answering with frozen large language models. In ICCV Workshops, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.212, + 0.827, + 0.251 + ], + "angle": 0, + "content": "Jong Sung Park, Kanchana Ranasinghe, Kumara Kahatapitiya, Wonjeong Ryoo, Donghyun Kim, and Michael S. Ryoo. Too many frames, not all useful: Efficient strategies for long-form video qa. ArXiv, abs/2406.09396, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.261, + 0.825, + 0.288 + ], + "angle": 0, + "content": "Rui Qian, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Shuangrui Ding, Dahua Lin, and Jiaqi Wang. Streaming long video understanding with large language models. In NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.297, + 0.826, + 0.336 + ], + "angle": 0, + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.345, + 0.824, + 0.372 + ], + "angle": 0, + "content": "Manjusha Rajan and Latha Parameswaran. Key frame extraction algorithm for surveillance videos using an evolutionary approach. Scientific Reports, 15(1):536, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.381, + 0.824, + 0.407 + ], + "angle": 0, + "content": "Kanchana Ranasinghe, Xiang Li, Kumara Kahapatitiya, and Michael S Ryoo. Understanding long videos with multimodal language models. In ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.416, + 0.826, + 0.443 + ], + "angle": 0, + "content": "Yudi Shi, Shangzhe Di, Qirui Chen, and Weidi Xie. Unlocking video-llm via agent-of-thoughts distillation. arXiv preprint arXiv:2412.01694, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.452, + 0.827, + 0.49 + ], + "angle": 0, + "content": "Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multi-modal llms. arXiv preprint arXiv:2409.10994, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.5, + 0.825, + 0.527 + ], + "angle": 0, + "content": "John F. Sowa. Knowledge Representation: Logical, Philosophical, and Computational Foundations. Brooks/Cole Publishing Co., Pacific Grove, CA, USA, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.536, + 0.825, + 0.563 + ], + "angle": 0, + "content": "Leonard Talmy. Toward a Cognitive Semantics (Volume 1: Concept Structuring Systems; Volume 2: Typology and Process in Concept Structuring). MIT Press, Cambridge, MA, USA, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.572, + 0.826, + 0.599 + ], + "angle": 0, + "content": "Reuben Tan, Xineng Sun, Ping Hu, Jui hsien Wang, Hanieh Deilamsalehy, Bryan A. Plummer, Bryan Russell, and Kate Saenko. Koala: Key frame-conditioned long video-llm. CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.608, + 0.826, + 0.646 + ], + "angle": 0, + "content": "Yunlong Tang, Jing Bi, Siting Xu, Luchuan Song, Susan Liang, Teng Wang, Daoan Zhang, Jie An, Jingyang Lin, Rongyi Zhu, et al. Video understanding with large language models: A survey. arXiv preprint arXiv:2312.17432, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.656, + 0.825, + 0.695 + ], + "angle": 0, + "content": "Hengyi Wang, Haizhou Shi, Shiwei Tan, Weiyi Qin, Wenyuan Wang, Tunyu Zhang, Akshay Nambi, Tanuja Ganu, and Hao Wang. Multimodal needle in a haystack: Benchmarking long-context capability of multimodal large language models, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.704, + 0.825, + 0.731 + ], + "angle": 0, + "content": "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.74, + 0.825, + 0.767 + ], + "angle": 0, + "content": "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, pages 58-76. Springer, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.776, + 0.827, + 0.816 + ], + "angle": 0, + "content": "Zhanyu Wang, Longyue Wang, Zhen Zhao, Minghao Wu, Chenyang Lyu, Huayang Li, Deng Cai, Luping Zhou, Shuming Shi, and Zhaopeng Tu. Gpt4video: A unified multimodal large language model for Instruction-followed understanding and safety-aware generation. In ACM MM, pages 3907-3916, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.825, + 0.827, + 0.863 + ], + "angle": 0, + "content": "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. ArXiv, abs/2405.19209, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.873, + 0.825, + 0.911 + ], + "angle": 0, + "content": "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024e." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.14, + 0.826, + 0.192 + ], + "angle": 0, + "content": "Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13587-13597, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.2, + 0.827, + 0.216 + ], + "angle": 0, + "content": "Penghao Wu and Saining Xie. V*: Guided visual search as a core mechanism in multimodal llms. CVPR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.223, + 0.825, + 0.25 + ], + "angle": 0, + "content": "Jiaqi Xu, Cuiling Lan, Wenxuan Xie, Xuejin Chen, and Yan Lu. Retrieval-based video language model for efficient long video question answering. arXiv preprint arXiv:2312.04931, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.258, + 0.825, + 0.285 + ], + "angle": 0, + "content": "Jinhui Ye, Zihan Wang, and Haosen Sun. Longvideohaystack. https://huggingface.co/datasets/LVHaystack/LongVideoHaystack, 2025a. v1.0." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.293, + 0.825, + 0.332 + ], + "angle": 0, + "content": "Jinhui Ye, Zihan Wang, Haosen Sun, Keshigeyan Chandrasegaran, Zane Durante, Cristobal Eyzaguirre, Yonatan Bisk, Juan Carlos Niebles, Ehsan Adeli, Li Fei-Fei, Jiajun Wu, and Manling Li. Re-thinking temporal search for long-form video understanding. In CVPR, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.341, + 0.825, + 0.368 + ], + "angle": 0, + "content": "Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. National Science Review, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.377, + 0.826, + 0.416 + ], + "angle": 0, + "content": "Sicheng Yu, Chengkai Jin, Huan Wang, Zhenghao Chen, Sheng Jin, Zhongrong Zuo, Xiaolei Xu, Zhenbang Sun, Bingni Zhang, Jiawei Wu, Hao Zhang, and Qianru Sun. Frame-voyager: Learning to query frames for video large language models. ArXiv, abs/2410.03226, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.424, + 0.825, + 0.451 + ], + "angle": 0, + "content": "Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.459, + 0.826, + 0.499 + ], + "angle": 0, + "content": "Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, Yali Wang, Yu Qiao, and Limin Wang. Timesuite: Improving mllms for long video understanding via grounded tuning, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.507, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. In EMNLP, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.542, + 0.825, + 0.569 + ], + "angle": 0, + "content": "Lu Zhang, Tiancheng Zhao, Heting Ying, Yibo Ma, and Kyusong Lee. OmAgent: A multi-modal agent framework for complex video understanding with task divide-and-conquer. In EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.826, + 0.616 + ], + "angle": 0, + "content": "Zijia Zhao, Haoyu Lu, Yuqi Huo, Yifan Du, Tongtian Yue, Longteng Guo, Bingning Wang, Weipeng Chen, and Jing Liu. Needle in a video haystack: A scalable synthetic evaluator for video mllms. arXiv preprint arXiv:2406.09367, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.625, + 0.826, + 0.665 + ], + "angle": 0, + "content": "Heqing Zou, Tianze Luo, Guiyang Xie, Fengmao Lv, Guangcong Wang, Junyang Chen, Zhuochen Wang, Hansheng Zhang, Huajian Zhang, et al. From seconds to hours: Reviewing multimodal large language models on comprehensive long video understanding. arXiv preprint arXiv:2409.18938, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.088, + 0.237, + 0.105 + ], + "angle": 0, + "content": "Part I" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.118, + 0.316, + 0.147 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.171, + 0.354, + 0.19 + ], + "angle": 0, + "content": "Table of Contents" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.198, + 0.786, + 0.212 + ], + "angle": 0, + "content": "A Theoretical Underpinnings of Relation Categories 14" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.214, + 0.786, + 0.227 + ], + "angle": 0, + "content": "A.1 Linguistic Grounding 14" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.229, + 0.786, + 0.242 + ], + "angle": 0, + "content": "A.2 Logical Grounding 14" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.244, + 0.786, + 0.257 + ], + "angle": 0, + "content": "A.3 Pragmatic Completeness for VQA 14" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.214, + 0.786, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.27, + 0.786, + 0.283 + ], + "angle": 0, + "content": "B Performance 15" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.296, + 0.786, + 0.31 + ], + "angle": 0, + "content": "C Analysis of the Impact of Search Frame Count 15" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.27, + 0.786, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.323, + 0.786, + 0.336 + ], + "angle": 0, + "content": "D Details of Datasets 16" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.338, + 0.786, + 0.351 + ], + "angle": 0, + "content": "D.1 Details ofVIDEO-MME 16" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.353, + 0.786, + 0.366 + ], + "angle": 0, + "content": "D.2 Details of LONGVIDEOBENCH 16" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.368, + 0.786, + 0.381 + ], + "angle": 0, + "content": "D.3 Details of LV-HAYSTACK 16" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.383, + 0.786, + 0.396 + ], + "angle": 0, + "content": "D.4 Details of EGO-4D 17" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.338, + 0.786, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.41, + 0.786, + 0.423 + ], + "angle": 0, + "content": "E Detailed Algorithm 17" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.425, + 0.786, + 0.438 + ], + "angle": 0, + "content": "E.1 Algorithm Overview and Core Components 17" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.44, + 0.786, + 0.453 + ], + "angle": 0, + "content": "E.2 Implementation Considerations 19" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.456, + 0.786, + 0.469 + ], + "angle": 0, + "content": "E.3 Computational Complexity Analysis 19" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.471, + 0.786, + 0.483 + ], + "angle": 0, + "content": "E.4 Technical Implementation Details 19" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.486, + 0.786, + 0.499 + ], + "angle": 0, + "content": "E.5 Practical Application Examples 21" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.501, + 0.786, + 0.514 + ], + "angle": 0, + "content": "E.6 System Specifications for Reproductivity 21" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.425, + 0.786, + 0.514 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.526, + 0.786, + 0.54 + ], + "angle": 0, + "content": "F Case Study of VSLS Keyframe Selection 21" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.786, + 0.567 + ], + "angle": 0, + "content": "G Iteration Analysis 22" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.526, + 0.786, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.58, + 0.786, + 0.593 + ], + "angle": 0, + "content": "H Prompt 23" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.595, + 0.786, + 0.608 + ], + "angle": 0, + "content": "H.1 Prompt Template for Query Grounding 23" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.61, + 0.786, + 0.624 + ], + "angle": 0, + "content": "H.2 Prompt Template for Question Answering 23" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.595, + 0.786, + 0.624 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.636, + 0.786, + 0.649 + ], + "angle": 0, + "content": "I Limitations 24" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.663, + 0.786, + 0.676 + ], + "angle": 0, + "content": "J Broader Impacts 24" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.678, + 0.786, + 0.691 + ], + "angle": 0, + "content": "J.1 Positive Impacts 24" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.693, + 0.786, + 0.706 + ], + "angle": 0, + "content": "J.2 Potential Considerations 24" + }, + { + "type": "list", + "bbox": [ + 0.235, + 0.678, + 0.786, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.628, + 0.108 + ], + "angle": 0, + "content": "A Theoretical Underpinnings of Relation Categories" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.825, + 0.179 + ], + "angle": 0, + "content": "Our choice of the four relation categories—spatial, temporal, attribute, and causal—is grounded in foundational concepts from linguistics and logic. While achieving absolute “completeness” in describing the infinite complexity of the real world is a formidable challenge, this selection aims to describe core aspects of events, states, and the way humans conceptualize and communicate them." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.195, + 0.37, + 0.21 + ], + "angle": 0, + "content": "A.1 Linguistic Grounding" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.825, + 0.244 + ], + "angle": 0, + "content": "Semantic Roles and Case Grammar: Theories like Fillmore's Case Grammar Fillmore (1967) analyze sentences in terms of semantic roles that nominals play in relation to the verb (the event)." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.246, + 0.826, + 0.273 + ], + "angle": 0, + "content": "- Spatial relations directly correspond to roles like Locative (the location of an event or state) or Path (the trajectory of motion)." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.275, + 0.814, + 0.288 + ], + "angle": 0, + "content": "- Temporal relations align with Temporal roles, specifying when an event occurs or its duration." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.29, + 0.826, + 0.33 + ], + "angle": 0, + "content": "- Attributes describe the properties of entities (participants) involved in these roles. While not direct case roles for verbs, they are fundamental for identifying and characterizing the \"who\" and \"what\" (e.g., Agent, Patient, Theme, Instrument) that possess these attributes during an event." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.332, + 0.825, + 0.373 + ], + "angle": 0, + "content": "- Causal relations are central to understanding agency and event structure. Roles like Agent (the instigator of an action) or Cause (the non-volitional trigger of an event) highlight the importance of causality in linguistic descriptions of events." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.246, + 0.826, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.379, + 0.827, + 0.476 + ], + "angle": 0, + "content": "Lexical Semantics and Event Structure: Works in lexical semantics (e.g., following Pustejovsky Cohen (1968) on the generative lexicon, or Talmy Talmy (2000) on cognitive semantics) often decompose event meaning into fundamental components. Talmy Talmy (2000), for instance, extensively discusses how language structures concepts like space, time, and force dynamics (which inherently relate to causality). Events are situated in space and time, involve entities with specific attributes, and are often linked through causal chains (e.g., one action causing another, or an agent causing a change of state)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.826, + 0.58 + ], + "angle": 0, + "content": "Discourse Relations: Theories like Rhetorical Structure Theory (RST) Mann and Thompson (1988) identify relations that bind textual units together. Many of these fundamental relations are inherently temporal (e.g., Sequence), causal (e.g., Cause, Result, Purpose), or involve describing entities and their settings (which encompasses spatial and attributive information, often under relations like Elaboration or Background). This suggests that these four categories capture essential elements for constructing coherent descriptions and explanations, a core function of Video Question Answering (VQA)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.596, + 0.351, + 0.611 + ], + "angle": 0, + "content": "A.2 Logical Grounding" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.617, + 0.827, + 0.702 + ], + "angle": 0, + "content": "Predicate Logic and Knowledge Representation: In formal logic and AI knowledge representation (e.g., Sowa Sowa (2000)), events and states are often represented using predicates with arguments that specify participants, locations, times, and properties. A typical event representation might implicitly or explicitly include Location(event, place), Time(event, time_interval), HasProperty(event, attribute_value), and relations like Causes(event1, event2). Our four categories provide a high-level abstraction over these common predicate types." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.706, + 0.827, + 0.734 + ], + "angle": 0, + "content": "Modal and Specialized Logics: Temporal Logic is specifically designed to reason about propositions qualified in terms of time." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.735, + 0.78, + 0.749 + ], + "angle": 0, + "content": "- Spatial Logic deals with reasoning about spatial properties and relations between entities." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.75, + 0.825, + 0.791 + ], + "angle": 0, + "content": "- Logics of Action and Causality (e.g., situation calculus, event calculus, or Pearl's work on causality Neuberg (2003)) explicitly model how actions bring about changes and the causal dependencies between events." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.735, + 0.825, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.809, + 0.455, + 0.824 + ], + "angle": 0, + "content": "A.3 Pragmatic Completeness for VQA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.835, + 0.827, + 0.863 + ], + "angle": 0, + "content": "From a pragmatic standpoint, particularly for VQA, these four relations address the core \"Wh-questions\" humans often ask to understand a scene or event:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.867, + 0.713, + 0.882 + ], + "angle": 0, + "content": "- What/Who? (Identifies objects/entities, often distinguished by their attributes)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.883, + 0.461, + 0.897 + ], + "angle": 0, + "content": "- Where? (Answered by spatial relations)" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.898, + 0.471, + 0.912 + ], + "angle": 0, + "content": "- When? (Answered by temporal relations)" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.867, + 0.713, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.173, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "- Why/How did it happen? (Often answered by causal relations or a sequence of events linked temporally and spatially)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.125, + 0.825, + 0.196 + ], + "angle": 0, + "content": "While more fine-grained relations (as in Action Genome) undoubtedly provide deeper semantic detail, our chosen set aims to provide a foundational, yet computationally manageable, framework for keyframe selection based on the most common semantic and logical inferences required for a broad range of video queries. They represent a level of abstraction that is both meaningful for human queries and feasible for current visual-language models to parse and verify." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.2, + 0.825, + 0.243 + ], + "angle": 0, + "content": "In essence, these categories are not arbitrary but reflect fundamental dimensions along which events and states are structured, perceived, and communicated in language and reasoned about in logic. We believe they offer a robust and broadly applicable framework for the task at hand." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.263, + 0.318, + 0.279 + ], + "angle": 0, + "content": "B Performance" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.294, + 0.827, + 0.392 + ], + "angle": 0, + "content": "Long-form video understanding presents unique challenges due to the complexity of temporal dynamics and cross-modal interactions in extended durations (900-3,600 seconds). Our comprehensive evaluation of the LVB-XL benchmark reveals significant performance gaps between existing approaches. While large-scale models like GPT-4O (32 frames) and INTERNVL 2.5-78B (16 frames) have demonstrated competence in short-video tasks, their direct application to long-form content (marked by circle sizes proportional to model parameters) yields suboptimal results (53.8% and 56.5% accuracy respectively)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.828, + 0.482 + ], + "angle": 0, + "content": "Our Visual Semantic-Logical Search (VSLS) framework addresses these limitations. This advancement enables consistent performance improvements across different architecture scales, elevating GPT-40 to \\(54.2\\%\\) \\((+0.4\\mathrm{pp})\\) and achieving a remarkable \\(62.4\\%\\) \\((+5.9\\mathrm{pp})\\) for INTERNLV 2.5-78B on this benchmark. The comparative analysis further suggests that VSLS's gains become particularly pronounced when processing longer visual sequences, highlighting its effectiveness in modeling extended temporal contexts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.501, + 0.6, + 0.518 + ], + "angle": 0, + "content": "C Analysis of the Impact of Search Frame Count" + }, + { + "type": "image", + "bbox": [ + 0.258, + 0.541, + 0.744, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.82 + ], + "angle": 0, + "content": "Figure 5: Performance improvement with increasing search frames. VSLS consistently enhances accuracy and reaches near-human oracle performance at 64 frames." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.836, + 0.825, + 0.864 + ], + "angle": 0, + "content": "This section investigates the impact of the number of search frames on the performance of our Visual Language Models (VLMs) in the context of LONGVIDEOBENCH." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Figure 5 in the T* framework study empirically demonstrates the non-monotonic relationship between input frame quantity and model accuracy on the LONGVIDEOBENCH XL benchmark. Through systematic experimentation across 18 state-of-the-art VLMs, this visualization reveals a critical" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.825, + 0.12 + ], + "angle": 0, + "content": "phenomenon: excessive frame inputs degrade performance for models lacking temporal redundancy mitigation mechanisms." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.143, + 0.367, + 0.16 + ], + "angle": 0, + "content": "D Details of Datasets" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.176, + 0.387, + 0.19 + ], + "angle": 0, + "content": "D.1 Details of Video-MME" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.203, + 0.828, + 0.329 + ], + "angle": 0, + "content": "The Video-MME (Video Multi-Modal Evaluation) dataset represents the first comprehensive benchmark tailored to assess the capabilities of Vision-Language Models (VLMs) in video understanding. Aiming to address limitations in existing benchmarks, it emphasizes diversity, temporal complexity, and multi-modal integration while ensuring high-quality human annotations. The dataset contains 900 carefully curated videos across six primary domains—Knowledge, Film and Television, Sports Competition, Artistic Performance, Life Record, and Multilingual—with 30 fine-grained subcategories such as astronomy, esports, and documentaries. These videos vary significantly in duration, ranging from short clips (11 seconds) to long-form content (up to 1 hour), enabling robust evaluation across temporal scales." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.334, + 0.828, + 0.432 + ], + "angle": 0, + "content": "Each video is paired with expert-annotated multiple-choice questions (2,700 QA pairs in total), rigorously validated to ensure clarity and reliance on visual or multi-modal context. Questions span 12 task types, including action recognition, temporal reasoning, and domain-specific knowledge, with a focus on scenarios where answers cannot be inferred from text alone. To quantify temporal complexity, the dataset introduces certificate length analysis, revealing that answering questions often requires understanding extended video segments (e.g., median lengths of 26 seconds for short videos and 890.7 seconds for long videos), surpassing the demands of prior benchmarks like EGOSchema." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.437, + 0.828, + 0.507 + ], + "angle": 0, + "content": "VIDEO-MME serves as a universal benchmark, applicable to both image- and video-focused MLLMs, and exposes key challenges for future research. These include improving architectures for long-sequence processing, developing datasets for complex temporal reasoning, and enhancing cross-modal alignment. By providing a rigorous evaluation framework,VIDEO-MME aims to drive progress toward MLLMs capable of understanding dynamic, real-world scenarios." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.527, + 0.433, + 0.541 + ], + "angle": 0, + "content": "D.2 Details of LONGVIDEOBENCH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.828, + 0.679 + ], + "angle": 0, + "content": "The LONGVIDEOBENCH benchmark pioneers the evaluation of long-context interleaved video-language understanding in VLMs, addressing critical gaps in existing benchmarks through its focus on detailed retrieval and temporal reasoning over hour-long multimodal inputs. Designed to overcome the \"single-frame bias\" prevalent in prior video benchmarks, the novel referring reasoning paradigm enables models to locate and analyze specific contexts within extended sequences. The data set comprises 3,763 web-sourced videos that span various themes - movies, news, life vlogs, and knowledge domains (including art, history, and STEM) - with durations progressively grouped into four levels: 8-15 seconds, 15-60 seconds, 3-10 minutes, and 15-60 minutes. Each video is paired with aligned subtitles, forming interleaved multimodal inputs that mimic real-world viewing scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.828, + 0.797 + ], + "angle": 0, + "content": "The benchmark features 6,678 human-annotated multiple-choice questions categorized into 17 fine-grained task types across two levels: Perception (requiring object/attribute recognition in single scenes) and Relation (demanding temporal/causal reasoning across multiple scenes). Questions incorporate explicit referring queries (e.g., \"When the woman descends the rocky hill...\") that anchor reasoning to specific video moments, with an average question length of 43.5 words to ensure precision. Temporal complexity is quantified through duration-grouped analysis, where models must process up to 256 frames (at 1 fps) for hour-long videos, significantly exceeding the demands of predecessors like EGOSchema (180s videos)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.816, + 0.395, + 0.83 + ], + "angle": 0, + "content": "D.3 Details of LV-HAYSTACK" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.828, + 0.913 + ], + "angle": 0, + "content": "The LV-HAYSTACK benchmark establishes the first comprehensive evaluation framework for temporal search in long-form video understanding, addressing critical limitations in existing synthetic needle-in-haystack benchmarks through real-world video annotations and multi-dimensional evaluation metrics. Designed to assess models' ability to locate minimal keyframe sets (typically 1-5 frames) from hour-long videos containing tens of thousands of frames, the dataset comprises 3,874 human" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "annotated instances spanning 150 hours of video content across two distinct categories: egocentric videos from EGO4D (101 hours) and allocentric videos from LONGVIDEOBENCH (57.7 hours)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.825, + 0.168 + ], + "angle": 0, + "content": "Organized into HAYSTACK-EGO4D and HAYSTACK-LVBENCH subsets, the benchmark features videos averaging 24.8 minutes in length (max 60 minutes) with 44,717 frames per video. Each instance contains:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.173, + 0.788, + 0.188 + ], + "angle": 0, + "content": "- Expert-curated multi-choice questions requiring temporal reasoning (15.9 questions/video);" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.188, + 0.825, + 0.216 + ], + "angle": 0, + "content": "- Human-annotated keyframe sets (4.7 frames/question for egocentric, 1.8 frames/question for allocentric);" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.217, + 0.636, + 0.232 + ], + "angle": 0, + "content": "- Temporal and visual similarity metrics for precise search evaluation." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.173, + 0.825, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.252, + 0.348, + 0.266 + ], + "angle": 0, + "content": "D.4 Details of EGO-4D" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.827, + 0.391 + ], + "angle": 0, + "content": "The EGO4D (Egocentric Computer Vision Benchmark) dataset establishes a transformative foundation for advancing research in first-person visual perception through unprecedented scale, diversity, and multi-modal integration. Designed to overcome limitations in existing egocentric datasets, it captures 3,670 hours of unscripted daily activities from 931 participants across 74 global locations and 9 countries, spanning household, workplace, leisure, and outdoor scenarios. The dataset features \\(30+\\) fine-grained activity categories including carpentry, social gaming, and meal preparation, with videos ranging from brief interactions (8-minute clips) to extended continuous recordings (up to 10 hours), enabling comprehensive analysis of long-term behavioral patterns." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.827, + 0.467 + ], + "angle": 0, + "content": "Each video is enriched with multi-modal annotations totaling 3.85 million dense textual narrations (13.2 sentences/minute), coupled with 3D environment meshes, eye gaze tracking, stereo vision, and synchronized multi-camera views. Rigorous privacy protocols ensure ethical data collection, with 612 hours containing unblurred faces/audio for social interaction studies. The benchmark suite introduces five core tasks organized across temporal dimensions:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.471, + 0.825, + 0.498 + ], + "angle": 0, + "content": "- Episodic Memory: Temporal localization of natural language queries (74K instances) and 3D object tracking using Matterport scans;" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.5, + 0.826, + 0.528 + ], + "angle": 0, + "content": "- **Hand-Object Interaction:** State change detection (1.3M annotations) with PNR (point-of-no-return) temporal localization;" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.529, + 0.825, + 0.556 + ], + "angle": 0, + "content": "- Social Understanding: Audio-visual diarisation (2,535h audio) and gaze-directed communication analysis;" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.558, + 0.741, + 0.572 + ], + "angle": 0, + "content": "- Action Forecasting: Anticipation of locomotion trajectories and object interactions." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.471, + 0.826, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.577, + 0.827, + 0.661 + ], + "angle": 0, + "content": "Quantitative analysis reveals the dataset's complexity: hand-object interactions involve 1,772 unique verbs and 4,336 nouns, while social scenarios contain 6.8 participant interactions per minute on average. Multi-modal fusion experiments demonstrate performance gains, with 3D environment context improving object localization accuracy by \\(18.7\\%\\) compared to RGB-only baselines. State-of-the-art models achieve \\(68.9\\%\\) accuracy in action anticipation tasks, yet struggle with long-term forecasting (41.2% accuracy for 5s predictions), highlighting critical challenges in temporal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.827, + 0.765 + ], + "angle": 0, + "content": "EGO4D's unique integration of egocentric video with complementary modalities (IMU data in 836h, gaze tracking in 45h) enables novel research directions in embodied AI and augmented reality. The dataset exposes fundamental limitations in current architectures, particularly in processing hour-long video contexts and synthesizing cross-modal signals—only \\(23\\%\\) of tested models effectively utilized audio-visual synchronization cues. By providing standardized evaluation protocols and curated challenge subsets, EGO4D serves as a universal testbed for developing perceptive systems capable of understanding persistent 3D environments and complex human behaviors." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.788, + 0.371, + 0.804 + ], + "angle": 0, + "content": "E Detailed Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.822, + 0.562, + 0.836 + ], + "angle": 0, + "content": "The detailed VSLS algorithm is represented in Algorithm 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.521, + 0.872 + ], + "angle": 0, + "content": "E.1 Algorithm Overview and Core Components" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "The algorithm operates as an adaptive search framework that intelligently explores video content (represented as set \\(V\\)) to locate frames matching semantic-logical query requirements \\((Q)\\). Unlike" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.095, + 0.582, + 0.11 + ], + "angle": 0, + "content": "Algorithm 2: The completed Visual Semantic-Logical Search" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.112, + 0.825, + 0.449 + ], + "angle": 0, + "content": "Function SemanticLogicalTemporalSearch(V,Q,K, \\(\\Delta_t,\\tau ,\\alpha ,\\gamma\\) .. \n\\(\\mathcal{O},\\mathcal{R}\\gets\\) ParseQuestion(Q); // Extract key/cue objects and relationships \n\\(P\\leftarrow\\) Uniform, \\(B\\leftarrow |V|,S\\leftarrow \\emptyset ,N_{v}\\leftarrow |V|\\) // Initialize distribution and state \nwhile \\(B > 0\\) and \\(|\\mathcal{O}| > 0\\) do \n\\(k\\gets \\lfloor \\sqrt{B}\\rfloor ,G\\gets\\) Grid(Sample \\((P,k^2)\\) ); // Adaptive grid sampling \n\\(\\Omega \\gets\\) DetectObjects(G); // Detect objects in sampled frames \nforeach \\(g\\in G\\) do \n\\(C_g\\gets\\) CalculateBaseScore( \\(\\Omega [g])\\) ; // Base detection confidence \nforeach \\(r\\in \\mathcal{R}\\) do if r.type \\(=\\) Spatial then \\(C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{spatial}}\\cdot\\) CheckSpatialRelationship(r, \\(\\Omega [g])\\) else if r.type \\(=\\) Temporal then \\(C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{time}}\\cdot\\) CheckTemporalRelationship(r, \\(\\Omega ,\\Delta_t)\\) else if r.type \\(=\\) Causal then \\(C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{causal}}\\cdot\\) CheckCausalRelationship(r, \\(\\Omega\\) ) else if r.type \\(=\\) Attribute then \\(C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{attr}}\\cdot\\) CheckAttributeRelationship(r, \\(\\Omega [g],\\tau\\) UpdateScores(S,g,Cg); // Update global score registry DiffuseScores(S,w); // Temporal context propagation \\(P\\gets\\) NormalizeDistribution(S), \\(B\\gets B - k^{2}\\) // Update sampling distribution foreach \\(g\\in \\operatorname {TopK}(S,K)\\) do if \\(\\Omega [g]\\cap \\mathcal{O}\\neq \\emptyset\\) then \\(\\begin{array}{rl}{\\mathcal{O}}&{\\leftarrow\\mathcal{O}\\backslash\\Omega[g]}\\end{array}\\) // Remove identified key objects \nreturn TopK(S,K); // Return top-K keyframes" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.489, + 0.825, + 0.518 + ], + "angle": 0, + "content": "traditional linear search methods, it employs a probabilistic sampling strategy that dynamically adjusts based on confidence scores from multiple relationship types." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.539, + 0.825, + 0.568 + ], + "angle": 0, + "content": "Initialization Phase The process begins by parsing the input query \\( Q \\) into two fundamental components:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.573, + 0.486, + 0.587 + ], + "angle": 0, + "content": "- \\(\\mathcal{O}\\): A set of key objects or entities to identify" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.588, + 0.805, + 0.603 + ], + "angle": 0, + "content": "- \\(\\mathcal{R}\\): A collection of relationships (spatial, temporal, causal, and attribute) that must be satisfied" + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.573, + 0.805, + 0.603 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.827, + 0.663 + ], + "angle": 0, + "content": "The algorithm initializes with a uniform probability distribution \\((P)\\) across all video frames, establishing a budget \\((B)\\) equivalent to the total number of frames \\((|V|)\\), and creating an empty score registry \\((S)\\) to track confidence values. This approach ensures unbiased initial exploration before evidence-guided refinement." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.828, + 0.757 + ], + "angle": 0, + "content": "Adaptive Sampling Strategy Rather than exhaustively processing every frame, the algorithm employs a square-root scaling sampling strategy where \\( k = \\lfloor \\sqrt{B} \\rfloor \\) determines the sampling density. This provides a mathematical balance between exploration breadth and computational efficiency. The Grid function organizes sampled frames into a structured representation that preserves spatial-temporal relationships, facilitating subsequent relationship analysis." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.826, + 0.835 + ], + "angle": 0, + "content": "Multi-modal Object Detection The DetectObjects function applies state-of-the-art computer vision techniques to identify objects within each sampled frame. This step leverages deep neural networks pre-trained on diverse visual datasets, enabling recognition of a wide range of entities with their corresponding confidence scores and spatial locations within frames." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Score Propagation and Distribution Update The DiffuseScores function implements a temporal context propagation mechanism that spreads confidence values to neighboring frames, acknowledging that relevant content likely extends beyond individual frames. This diffusion creates a smoothed confidence landscape that guides subsequent sampling." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "After each iteration, the algorithm normalizes the accumulated scores to form an updated probability distribution, focusing future sampling on promising regions while maintaining exploration potential in unexamined areas." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.149, + 0.682, + 0.163 + ], + "angle": 0, + "content": "Convergence Criteria and Termination The search continues until either:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.168, + 0.812, + 0.182 + ], + "angle": 0, + "content": "- The sampling budget \\((B)\\) is exhausted, indicating comprehensive coverage of the video content" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.184, + 0.761, + 0.198 + ], + "angle": 0, + "content": "- All target objects \\((\\mathcal{O})\\) have been successfully identified at satisfactory confidence levels" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.168, + 0.812, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.203, + 0.825, + 0.232 + ], + "angle": 0, + "content": "This dual-termination approach balances thoroughness with efficiency, preventing unnecessary computation once objectives are met." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.246, + 0.825, + 0.302 + ], + "angle": 0, + "content": "Result Generation The algorithm concludes by returning the top-K frames with the highest confidence scores, representing the most relevant video segments that satisfy the semantic-logical query requirements. These keyframes provide a concise summary of the content matching the user's information needs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.318, + 0.438, + 0.334 + ], + "angle": 0, + "content": "E.2 Implementation Considerations" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.345, + 0.66, + 0.36 + ], + "angle": 0, + "content": "The algorithm's performance depends on several configurable parameters:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.363, + 0.531, + 0.378 + ], + "angle": 0, + "content": "- \\(\\Delta_{t}\\): Temporal window size for relationship analysis" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.379, + 0.497, + 0.393 + ], + "angle": 0, + "content": "- \\(\\tau\\): Confidence threshold for attribute matching" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.394, + 0.444, + 0.408 + ], + "angle": 0, + "content": "- \\(\\alpha\\): Global relationship influence factor" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.409, + 0.435, + 0.424 + ], + "angle": 0, + "content": "- \\(\\gamma\\): Type-specific relationship weights" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.363, + 0.531, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.428, + 0.827, + 0.485 + ], + "angle": 0, + "content": "These parameters can be tuned based on application requirements, video characteristics, and computational constraints. The algorithm's modular design allows for straightforward substitution of specific component implementations (e.g., different object detectors or relationship checkers) without altering the overall framework." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.501, + 0.471, + 0.516 + ], + "angle": 0, + "content": "E.3 Computational Complexity Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.527, + 0.825, + 0.598 + ], + "angle": 0, + "content": "The time complexity scales with \\( O(\\sqrt{N}) \\) where \\( N \\) is the total number of frames, significantly improving upon linear approaches. Space complexity remains \\( O(N) \\) to maintain the probability distribution and score registry. The algorithm intelligently balances exploration and exploitation through its adaptive sampling approach, making it particularly suitable for large-scale video analysis tasks where exhaustive processing would be prohibitive." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.614, + 0.451, + 0.629 + ], + "angle": 0, + "content": "E.4 Technical Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.825, + 0.683 + ], + "angle": 0, + "content": "Object Detection and Feature Extraction To achieve real-time performance, the object detection module utilizes pre-trained deep convolutional neural network architectures, particularly variants based on FAST R-CNN andYOLO series. The system employs a two-stage detection strategy:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.687, + 0.764, + 0.701 + ], + "angle": 0, + "content": "- Preliminary Detection: Using lightweight models to rapidly identify potential regions;" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.702, + 0.825, + 0.731 + ], + "angle": 0, + "content": "- Fine-grained Classification: Applying more sophisticated models for detailed classification on high-confidence regions." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.687, + 0.825, + 0.731 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.827, + 0.792 + ], + "angle": 0, + "content": "The feature extraction process leverages self-attention mechanisms from Visual Transformers (ViT), generating rich semantic embeddings robust to various visual variations such as scale, rotation, and illumination. Each identified object is associated with a feature vector \\( f_{i} \\in \\mathbb{R}^{d} \\), where \\( d = 512 \\) represents the dimensionality of the embedding space." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.806, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Mathematical Formulations for Relationship Assessment The evaluation of various relationship types is based on precise mathematical definitions:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.849, + 0.825, + 0.879 + ], + "angle": 0, + "content": "Spatial Relationships Given bounding boxes \\( B_{i} = (x_{i},y_{i},w_{i},h_{i}) \\) and \\( B_{j} = (x_{j},y_{j},w_{j},h_{j}) \\) for two objects, the confidence for a spatial relationship \\( r_{\\text{spatial}} \\) is calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.897, + 0.826, + 0.915 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {s p a t i a l}} \\left(B _ {i}, B _ {j}, r\\right) = \\phi_ {r} \\left(B _ {i}, B _ {j}\\right) \\cdot \\psi \\left(B _ {i}\\right) \\cdot \\psi \\left(B _ {j}\\right), \\tag {12}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.121 + ], + "angle": 0, + "content": "where \\(\\phi_r\\) is a relationship-specific compatibility function and \\(\\psi\\) is the object detection confidence. For example, the compatibility for a \"contains\" relationship is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.139, + 0.826, + 0.174 + ], + "angle": 0, + "content": "\\[\n\\phi_ {\\text {c o n t a i n s}} \\left(B _ {i}, B _ {j}\\right) = \\frac {\\operatorname {I o U} \\left(B _ {i} , B _ {j}\\right)}{\\operatorname {A r e a} \\left(B _ {j}\\right)}. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.188, + 0.825, + 0.218 + ], + "angle": 0, + "content": "Temporal Relationships Temporal relationships are calculated by evaluating object behavior patterns across a sequence of frames \\(\\{F_t, F_{t+1}, \\dots, F_{t+\\Delta_t}\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.236, + 0.826, + 0.279 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {t e m p o r a l}} \\left(O _ {i}, O _ {j}, r, \\Delta_ {t}\\right) = \\prod_ {k = 0} ^ {\\Delta_ {t} - 1} T _ {r} \\left(O _ {i} ^ {t + k}, O _ {j} ^ {t + k + 1}\\right), \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.32 + ], + "angle": 0, + "content": "where \\( T_{r} \\) is a relationship-specific temporal transition matrix and \\( O_{i}^{t} \\) represents the state of object \\( i \\) at time \\( t \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.337, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Causal Relationships Causal relationships utilize a Bayesian network framework to compute conditional probabilities:" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.384, + 0.826, + 0.419 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {c a u s a l}} \\left(E _ {i}, E _ {j}\\right) = P \\left(E _ {j} \\mid E _ {i}\\right) \\cdot \\log \\frac {P \\left(E _ {j} \\mid E _ {i}\\right)}{P \\left(E _ {j}\\right)}, \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.719, + 0.446 + ], + "angle": 0, + "content": "where \\( E_{i} \\) and \\( E_{j} \\) represent the presumed cause event and effect event, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.461, + 0.825, + 0.49 + ], + "angle": 0, + "content": "Attribute Relationships Attribute evaluation employs cosine similarity metrics between feature vectors and attribute prototypes:" + }, + { + "type": "equation", + "bbox": [ + 0.366, + 0.512, + 0.826, + 0.528 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {a t t r}} \\left(O _ {i}, a\\right) = \\max \\left(0, \\cos \\left(f _ {i}, p _ {a}\\right) - \\tau\\right), \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.541, + 0.763, + 0.556 + ], + "angle": 0, + "content": "where \\(p_a\\) is the prototype vector for attribute \\(a\\) and \\(\\tau\\) is the minimum similarity threshold." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.572, + 0.825, + 0.601 + ], + "angle": 0, + "content": "Score Propagation Algorithm Temporal score propagation is implemented through a weighted diffusion process, analogous to heat diffusion on a graph structure:" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.619, + 0.826, + 0.653 + ], + "angle": 0, + "content": "\\[\nS ^ {\\prime} (t) = S (t) + \\sum_ {k \\in \\mathcal {N} (t)} w _ {k, t} \\cdot S (k), \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.667, + 0.825, + 0.696 + ], + "angle": 0, + "content": "where \\(\\mathcal{N}(t)\\) represents the temporal neighborhood of frame \\(t\\), and \\(w_{k,t}\\) is a weight based on temporal distance, defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.714, + 0.826, + 0.75 + ], + "angle": 0, + "content": "\\[\nw _ {k, t} = \\exp \\left(- \\frac {\\left| k - t \\right| ^ {2}}{2 \\sigma^ {2}}\\right), \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.761, + 0.418, + 0.776 + ], + "angle": 0, + "content": "where \\(\\sigma\\) controls the diffusion range." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Adaptive Sampling Optimization The sampling strategy is further improved through a dynamically adjusted Thompson sampling method, modeling the probability distribution \\( P \\) as a Beta distribution with shape parameters updated through previous observations:" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.853, + 0.826, + 0.885 + ], + "angle": 0, + "content": "\\[\nP (t) \\sim \\operatorname {B e t a} \\left(\\alpha_ {t} + \\sum_ {i} S _ {i} (t), \\beta_ {t} + n - \\sum_ {i} S _ {i} (t)\\right), \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.898, + 0.729, + 0.913 + ], + "angle": 0, + "content": "where \\(\\alpha_{t}\\) and \\(\\beta_{t}\\) are prior hyperparameters and \\(n\\) is the total number of observations." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.437, + 0.108 + ], + "angle": 0, + "content": "E.5 Practical Application Examples" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.12, + 0.825, + 0.15 + ], + "angle": 0, + "content": "In practical visual search scenarios, the algorithm processes complex queries such as \"a person wearing a blue shirt sits down at a table and then picks up a coffee cup\":" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.154, + 0.826, + 0.196 + ], + "angle": 0, + "content": "- Query parsing identifies key objects (person, shirt, table, coffee cup) and relationships (blue attribute, sitting action, temporal before-after relation, spatial proximity);" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.197, + 0.611, + 0.21 + ], + "angle": 0, + "content": "- Adaptive sampling selects representative frames from the video;" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.212, + 0.638, + 0.225 + ], + "angle": 0, + "content": "- Multi-rerelationship evaluation integrates various sources of evidence;" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.227, + 0.759, + 0.24 + ], + "angle": 0, + "content": "- Score propagation establishes a unified confidence landscape across related frame sets;" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.242, + 0.778, + 0.256 + ], + "angle": 0, + "content": "- Result generation provides a concise summary of the most relevant segments in the video." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.154, + 0.826, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.26, + 0.825, + 0.303 + ], + "angle": 0, + "content": "This semantic-logical-temporal search framework represents a significant advancement in multimodal content retrieval, enabling natural language queries that incorporate complex relationships across objects, time, and causal chains." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.326, + 0.499, + 0.342 + ], + "angle": 0, + "content": "E.6 System Specifications for Reproductivity" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.354, + 0.827, + 0.425 + ], + "angle": 0, + "content": "Our experiments were conducted on high-performance servers, each equipped with either an Intel(R) Xeon(R) Platinum 8378A CPU @ 3.00GHz or an Intel(R) Xeon(R) Platinum 8358P CPU @ 2.60GHz, 1TB of RAM, and 4/6 NVIDIA A800 GPUs with 80GB memory. Machines with 4 GPUs are configured with the SXM4 version, while those with 6 GPUs use the PCIe version. The software environment included Python 3.11, PyTorch 2.4, and NCCL 2.21.5 for reactivity." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.451, + 0.538, + 0.469 + ], + "angle": 0, + "content": "F Case Study of VSLS Keyframe Selection" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.503, + 0.756, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.73, + 0.827, + 0.801 + ], + "angle": 0, + "content": "Figure 6: Qualitative comparison of frame selection strategies demonstrates VSLS's ability to pinpoint query-critical moments (e.g., the subject presenting pink objects) with temporal precision, while baseline approaches exhibit color misinterpretation (brown) due to suboptimal frame choices. VSLS maintains superior temporal diversity and content relevance, effectively avoiding the redundant selections observed in comparative methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.822, + 0.827, + 0.892 + ], + "angle": 0, + "content": "As shown in Figure 6, the VSLS framework demonstrates its effectiveness through a video question-answering case study involving temporal handwriting analysis. The experiment focuses on distinguishing between two sequential events: a brown pen writing \"guitar\" at 2 seconds and a pink pen rewriting the same word at 3 seconds, with the query requiring identification of the second occurrence's pen color." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.898, + 0.625, + 0.913 + ], + "angle": 0, + "content": "VSLS's analytical process unfolds through three interpretable phases:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.173, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "- Semantic Logic Extraction: Identifies core visual entities (handwritten text, pen, paper) and constructs temporal relationships through triplet formulation: (text, time, pen), establishing the framework for tracking writing instrument changes;" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.134, + 0.826, + 0.177 + ], + "angle": 0, + "content": "- Temporal Relevance Scoring: The gray relevance curve reveals precise temporal localization, with peak scores aligning perfectly with ground truth positions at 2s and 3s, contrasting sharply with baseline methods' random fluctuations;" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.177, + 0.825, + 0.22 + ], + "angle": 0, + "content": "- Search Pattern Visualization: Demonstrates VSLS's focused inspection near critical moments versus uniform sampling's scattered temporal coverage, explaining the baseline's failure to detect the pink pen." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.092, + 0.826, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.224, + 0.678, + 0.239 + ], + "angle": 0, + "content": "This case study yields two critical insights about VSLS's temporal reasoning:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.243, + 0.825, + 0.271 + ], + "angle": 0, + "content": "- Sequential Event Disambiguation: The system successfully differentiates between near-identical visual events through:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.272, + 0.596, + 0.286 + ], + "angle": 0, + "content": "- First writing instance: Brown pen detection(false positive);" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.287, + 0.594, + 0.301 + ], + "angle": 0, + "content": "- Second writing instance: Pink pen detection(true positive)." + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.301, + 0.825, + 0.33 + ], + "angle": 0, + "content": "- Explanation of answer generation disparity: VSLS produces the correct answer (\"Pink\") versus uniform sampling's erroneous baseline (\"Brown\") due to temporal reasoning failures." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.272, + 0.825, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.334, + 0.826, + 0.39 + ], + "angle": 0, + "content": "The spatial-temporal alignment between relevance peaks and ground truth positions confirms VSLS's unique capacity to synchronize semantic logic with visual evidence flow. This case particularly highlights the method's superiority in scenarios requiring precise discrimination of recurrent events with subtle visual variations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.41, + 0.362, + 0.427 + ], + "angle": 0, + "content": "G Iteration Analysis" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.448, + 0.658, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.576, + 0.827, + 0.618 + ], + "angle": 0, + "content": "Figure 7: The comparative visualization of iteration counts on the medium-length video subset of the VIDEO-MME dataset demonstrates that our method consistently requires a higher number of iterations compared to the T* approach." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.633, + 0.826, + 0.677 + ], + "angle": 0, + "content": "As shown in Fig 7, incorporating relations into the search algorithm will increase the average number of iterations for the video of medium length in the Video-MME dataset from 15.9 to 23.8. The overall distribution of video iteration will not be significantly changed." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.279, + 0.108 + ], + "angle": 0, + "content": "H Prompt" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.118, + 0.496, + 0.135 + ], + "angle": 0, + "content": "H.1 Prompt Template for Query Grounding" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.14, + 0.496, + 0.157 + ], + "angle": 0, + "content": "Here is the prompt we used for query grounding." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.165, + 0.459, + 0.182 + ], + "angle": 0, + "content": "Prompt Template for Query Grounding" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.193, + 0.551, + 0.208 + ], + "angle": 0, + "content": "Analyze the following video frames and the question:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.208, + 0.357, + 0.221 + ], + "angle": 0, + "content": "Question: " + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.222, + 0.34, + 0.234 + ], + "angle": 0, + "content": "Options: <0options>" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.235, + 0.42, + 0.248 + ], + "angle": 0, + "content": "Step 1: Key Object Identification" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.248, + 0.584, + 0.262 + ], + "angle": 0, + "content": "- Extract 3-5 core objects detectable by computer vision" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.263, + 0.621, + 0.277 + ], + "angle": 0, + "content": "- Use YOLO-compatible noun phrases (e.g., \"person\", \"mic\")" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.277, + 0.493, + 0.291 + ], + "angle": 0, + "content": "- Format: Key Objects: obj1, obj2, obj3" + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.248, + 0.621, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.291, + 0.364, + 0.303 + ], + "angle": 0, + "content": "Step 2: Contextual Cues" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.304, + 0.737, + 0.318 + ], + "angle": 0, + "content": "- List 2-4 scene elements that help locate key objects based on options provided" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.318, + 0.53, + 0.332 + ], + "angle": 0, + "content": "- Use detectable items (avoid abstract concepts)" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.332, + 0.493, + 0.345 + ], + "angle": 0, + "content": "- Format: Cue Objects: cue1, cue2, cue3" + }, + { + "type": "list", + "bbox": [ + 0.216, + 0.304, + 0.737, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.345, + 0.392, + 0.359 + ], + "angle": 0, + "content": "Step 3: Relationship Triplets" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.36, + 0.354, + 0.373 + ], + "angle": 0, + "content": "- Relationship types:" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.374, + 0.553, + 0.387 + ], + "angle": 0, + "content": "- Spatial: Objects must appear in the same frame" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.387, + 0.706, + 0.401 + ], + "angle": 0, + "content": "- Attribute: Color/size/material descriptions (e.g., \"red clothes\", \"large\")" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.401, + 0.601, + 0.414 + ], + "angle": 0, + "content": "- Time: Appear in different frames within a few seconds" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.414, + 0.596, + 0.428 + ], + "angle": 0, + "content": "- Causal: There is a temporal order between the objects" + }, + { + "type": "list", + "bbox": [ + 0.233, + 0.374, + 0.706, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.428, + 0.798, + 0.455 + ], + "angle": 0, + "content": "- Format of Relations: (object, relation_type, object), relation_type should be exactly one of spatial/attribute/time/causal" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.456, + 0.297, + 0.469 + ], + "angle": 0, + "content": "Output Rules" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.469, + 0.724, + 0.483 + ], + "angle": 0, + "content": "1. One line each for Key Objects/Cue Objects/Rel starting with exact prefixes" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.483, + 0.798, + 0.497 + ], + "angle": 0, + "content": "2. Separate items with comma except for triplets where items are separated by semicolon" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.497, + 0.59, + 0.511 + ], + "angle": 0, + "content": "3. Never use markdown or natural language explanations" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.511, + 0.798, + 0.538 + ], + "angle": 0, + "content": "4. If you cannot identify any key objects or cue objects from the video provided, please just identify the possible key or cue objects from the question and options provided" + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.469, + 0.798, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.539, + 0.466, + 0.552 + ], + "angle": 0, + "content": "Below is an example of the procedure:" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.552, + 0.704, + 0.566 + ], + "angle": 0, + "content": "Question: For \"When does the person in red clothes appear with the dog?\"" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.566, + 0.285, + 0.579 + ], + "angle": 0, + "content": "Response:" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.58, + 0.53, + 0.594 + ], + "angle": 0, + "content": "Key Objects: person, dog, red clothes" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.594, + 0.537, + 0.607 + ], + "angle": 0, + "content": "Cue Objects: grassy_area, leash, fence" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.608, + 0.734, + 0.621 + ], + "angle": 0, + "content": "Rel: (person; attribute; red clothes), (person; spatial; dog)" + }, + { + "type": "list", + "bbox": [ + 0.233, + 0.58, + 0.734, + 0.621 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.621, + 0.593, + 0.634 + ], + "angle": 0, + "content": "Format your response EXACTLY like this in three lines:" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.635, + 0.53, + 0.649 + ], + "angle": 0, + "content": "Key Objects: object1, object2, object" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.649, + 0.529, + 0.662 + ], + "angle": 0, + "content": "Cue Objects: object1, object2, object" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.663, + 0.799, + 0.69 + ], + "angle": 0, + "content": "Rel: (object1; relation_type1; object2), (object3; relation_type2; object4)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.718, + 0.512, + 0.735 + ], + "angle": 0, + "content": "H.2 Prompt Template for Question Answering" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.74, + 0.513, + 0.755 + ], + "angle": 0, + "content": "Here is the prompt we used for question answering." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.765, + 0.477, + 0.781 + ], + "angle": 0, + "content": "Prompt Template for Question Answering" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.792, + 0.75, + 0.807 + ], + "angle": 0, + "content": "Select the best answer to the following multiple-choice question based on the video." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.808, + 0.261, + 0.82 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.822, + 0.261, + 0.835 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.838, + 0.221, + 0.844 + ], + "angle": 0, + "content": "." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.848, + 0.356, + 0.861 + ], + "angle": 0, + "content": "Question: " + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.862, + 0.34, + 0.876 + ], + "angle": 0, + "content": "Options: <0options>" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.876, + 0.612, + 0.889 + ], + "angle": 0, + "content": "Answer with the option's letter from the given choices directly." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.889, + 0.684, + 0.903 + ], + "angle": 0, + "content": "Your response format should be strictly an upper case letter A,B,C,D or E." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.303, + 0.106 + ], + "angle": 0, + "content": "I Limitations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.828, + 0.22 + ], + "angle": 0, + "content": "Despite the promising results of our VSLS framework, we acknowledge several limitations: First, although our approach reduces the required frame sampling to just \\(1.4\\%\\), the computational complexity remains a consideration for extremely long videos, with a search overhead of approximately 7.8 seconds. This may present challenges for real-time or low-latency applications. Besides, the performance of VSLS is bounded by the capabilities of the underlying object detector (YOLO-WORLD). Detection accuracy may degrade under challenging visual conditions such as poor lighting, occlusion, or unusual camera angles, potentially affecting temporal coverage." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.238, + 0.35, + 0.256 + ], + "angle": 0, + "content": "J Broader Impacts" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.269, + 0.825, + 0.298 + ], + "angle": 0, + "content": "Our Visual Semantic-Logical Search (VSLS) framework primarily offers positive societal impacts as a foundational algorithm for efficient keyframe selection in long videos." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.313, + 0.329, + 0.329 + ], + "angle": 0, + "content": "J.1 Positive Impacts" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.334, + 0.826, + 0.361 + ], + "angle": 0, + "content": "- Educational Applications: VSLS enables students and educators to quickly locate relevant segments in instructional videos, improving learning efficiency for visual content." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.363, + 0.825, + 0.39 + ], + "angle": 0, + "content": "- Research Enhancement: Scientists across disciplines can benefit from more efficient analysis of video archives, particularly those studying behavioral patterns or analyzing historical footage." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.392, + 0.825, + 0.433 + ], + "angle": 0, + "content": "- Computational Efficiency: By sampling only \\(1.4\\%\\) of frames on average, our approach reduces computational requirements and energy consumption, contributing to more sustainable AI applications." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.435, + 0.825, + 0.474 + ], + "angle": 0, + "content": "- Accessibility: Our framework can be integrated into assistive technologies for individuals with cognitive processing challenges, helping them identify and focus on critical moments in video content." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.334, + 0.826, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.493, + 0.385, + 0.508 + ], + "angle": 0, + "content": "J.2 Potential Considerations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.518, + 0.825, + 0.547 + ], + "angle": 0, + "content": "As a foundational algorithm, VSLS has limited direct negative impacts. However, like any computer vision technology, applications built upon it should be mindful of general considerations:" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.551, + 0.827, + 0.595 + ], + "angle": 0, + "content": "- Underlying Model Biases: The performance of VSLS depends partly on object detection systems (e.g.,YOLO-World), so it inherits any limitations or biases present in these components. Our modular design allows for substitution with improved detection systems as they become available." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.935, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.389, + 0.108 + ], + "angle": 0, + "content": "NeurIPS Paper Checklist" + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.117, + 0.286, + 0.131 + ], + "angle": 0, + "content": "1. Claims" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.136, + 0.825, + 0.165 + ], + "angle": 0, + "content": "Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.17, + 0.331, + 0.184 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.188, + 0.827, + 0.3 + ], + "angle": 0, + "content": "Justification: The abstract and introduction clearly state the main contributions of our work, including (1) the proposal of a semantics-driven keyframe search framework using four logical relations, (2) performance gains on multiple long video QA benchmarks, (3) efficient frame sampling \\((1.4\\%)\\) with state-of-the-art results, and (4) plug-and-play compatibility with VLM/LLM pipelines. These claims are supported by both the method and experimental sections (see Sections \"Introduction\", \"Method\", and \"Experiment\"), and limitations are discussed in the main paper and Appendix I. The claims are fully aligned with the presented theoretical and empirical results." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.304, + 0.31, + 0.317 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.32, + 0.825, + 0.348 + ], + "angle": 0, + "content": "- The answer NA means that the abstract and introduction do not include the claims made in the paper." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.348, + 0.825, + 0.39 + ], + "angle": 0, + "content": "- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.391, + 0.825, + 0.419 + ], + "angle": 0, + "content": "- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.42, + 0.825, + 0.448 + ], + "angle": 0, + "content": "- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.32, + 0.825, + 0.448 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.453, + 0.318, + 0.466 + ], + "angle": 0, + "content": "2. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.472, + 0.805, + 0.487 + ], + "angle": 0, + "content": "Question: Does the paper discuss the limitations of the work performed by the authors?" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.491, + 0.331, + 0.505 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.51, + 0.625, + 0.525 + ], + "angle": 0, + "content": "Justification: The paper discusses limitations in Appendix I." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.53, + 0.31, + 0.543 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.545, + 0.825, + 0.573 + ], + "angle": 0, + "content": "- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.574, + 0.795, + 0.588 + ], + "angle": 0, + "content": "- The authors are encouraged to create a separate \"Limitations\" section in their paper." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.589, + 0.825, + 0.657 + ], + "angle": 0, + "content": "- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.659, + 0.825, + 0.699 + ], + "angle": 0, + "content": "- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.701, + 0.825, + 0.771 + ], + "angle": 0, + "content": "- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.772, + 0.825, + 0.799 + ], + "angle": 0, + "content": "- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.8, + 0.825, + 0.828 + ], + "angle": 0, + "content": "- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.829, + 0.825, + 0.913 + ], + "angle": 0, + "content": "- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.545, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.21, + 0.092, + 0.457, + 0.107 + ], + "angle": 0, + "content": "3. Theory assumptions and proofs" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.112, + 0.825, + 0.141 + ], + "angle": 0, + "content": "Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.146, + 0.331, + 0.16 + ], + "angle": 0, + "content": "Answer: [NA]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.166, + 0.827, + 0.223 + ], + "angle": 0, + "content": "Justification: The paper does not include formal theoretical results, theorems, or proofs. Our work is primarily methodological and experimental; all mathematical formulations are used to describe the algorithm and its components, but no formal theorems are claimed or proved. Therefore, this item is not applicable." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.228, + 0.31, + 0.241 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.244, + 0.725, + 0.257 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not include theoretical results." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.259, + 0.826, + 0.284 + ], + "angle": 0, + "content": "- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.287, + 0.822, + 0.301 + ], + "angle": 0, + "content": "- All assumptions should be clearly stated or referenced in the statement of any theorems." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.302, + 0.825, + 0.344 + ], + "angle": 0, + "content": "- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.345, + 0.823, + 0.373 + ], + "angle": 0, + "content": "- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.374, + 0.773, + 0.388 + ], + "angle": 0, + "content": "- Theorems and Lemmas that the proof relies upon should be properly referenced." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.244, + 0.826, + 0.388 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.394, + 0.484, + 0.409 + ], + "angle": 0, + "content": "4. Experimental result reproducibility" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.414, + 0.827, + 0.456 + ], + "angle": 0, + "content": "Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.462, + 0.331, + 0.476 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.481, + 0.827, + 0.58 + ], + "angle": 0, + "content": "Justification: The paper provides comprehensive details required for reproducibility, including descriptions of all datasets used (see Section \"Details of Datasets\" and Appendix D), implementation details of the proposed algorithm (see \"Method\" and \"Algorithm Overview\"), hyperparameter choices, prompt templates (Appendix \"Prompt\"), and evaluation protocols for each experiment. We also specify the object detection models and baselines used, and state that the code will be publicly released. This level of detail allows other researchers to replicate the main experiments and validate our claims." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.585, + 0.311, + 0.598 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.601, + 0.688, + 0.614 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not include experiments." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.616, + 0.825, + 0.657 + ], + "angle": 0, + "content": "- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.658, + 0.825, + 0.686 + ], + "angle": 0, + "content": "- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.601, + 0.825, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.687, + 0.826, + 0.811 + ], + "angle": 0, + "content": "- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.812, + 0.826, + 0.853 + ], + "angle": 0, + "content": "- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example" + }, + { + "type": "text", + "bbox": [ + 0.242, + 0.855, + 0.824, + 0.883 + ], + "angle": 0, + "content": "(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm." + }, + { + "type": "text", + "bbox": [ + 0.242, + 0.884, + 0.824, + 0.912 + ], + "angle": 0, + "content": "(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully." + }, + { + "type": "list", + "bbox": [ + 0.242, + 0.855, + 0.824, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.244, + 0.092, + 0.825, + 0.147 + ], + "angle": 0, + "content": "(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset)." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.148, + 0.825, + 0.219 + ], + "angle": 0, + "content": "(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results." + }, + { + "type": "list", + "bbox": [ + 0.244, + 0.092, + 0.825, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.222, + 0.441, + 0.236 + ], + "angle": 0, + "content": "5. Open access to data and code" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.24, + 0.827, + 0.282 + ], + "angle": 0, + "content": "Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?" + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.287, + 0.331, + 0.301 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.305, + 0.827, + 0.389 + ], + "angle": 0, + "content": "Justification: We state in the abstract and main text that the code will be publicly released. All datasets used in our experiments are from public benchmarks (LONGVIDEOBENCH,VIDEO-MME, HAYSTACK-LVBENCH, EGO4D), and details for data access are provided in Appendix D. Instructions for running our framework, data preparation, and experiment replication will be included in the released code repository. Thus, researchers will be able to access both code and data with clear instructions for full reproducibility." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.393, + 0.311, + 0.406 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.408, + 0.764, + 0.422 + ], + "angle": 0, + "content": "- The answer NA means that paper does not include experiments requiring code." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.423, + 0.825, + 0.451 + ], + "angle": 0, + "content": "- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.452, + 0.825, + 0.507 + ], + "angle": 0, + "content": "- While we encourage the release of code and data, we understand that this might not be possible, so \"No\" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark)." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.509, + 0.825, + 0.55 + ], + "angle": 0, + "content": "- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.551, + 0.825, + 0.579 + ], + "angle": 0, + "content": "- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.58, + 0.825, + 0.621 + ], + "angle": 0, + "content": "- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.622, + 0.825, + 0.65 + ], + "angle": 0, + "content": "- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable)." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.651, + 0.825, + 0.68 + ], + "angle": 0, + "content": "- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.408, + 0.825, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.683, + 0.432, + 0.698 + ], + "angle": 0, + "content": "6. Experimental setting/details" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.702, + 0.827, + 0.743 + ], + "angle": 0, + "content": "Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?" + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.748, + 0.331, + 0.763 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.766, + 0.827, + 0.849 + ], + "angle": 0, + "content": "Justification: The paper specifies all relevant experimental details, including descriptions of dataset splits, hyperparameters, evaluation metrics, and prompt templates (see \"Experiment,\" Table captions, and Appendix D). As our method is training-free, we clarify in the main text which components rely on pre-trained models and explicitly describe all parameter settings for reproducibility. This ensures that readers can fully understand and interpret the reported results." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.854, + 0.311, + 0.867 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.869, + 0.688, + 0.883 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not include experiments." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.232, + 0.092, + 0.825, + 0.119 + ], + "angle": 0, + "content": "- The full details can be provided either with the code, in appendix, or as supplemental material." + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.126, + 0.473, + 0.141 + ], + "angle": 0, + "content": "7. Experiment statistical significance" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.146, + 0.825, + 0.175 + ], + "angle": 0, + "content": "Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?" + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.181, + 0.327, + 0.195 + ], + "angle": 0, + "content": "Answer: [No]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.2, + 0.825, + 0.284 + ], + "angle": 0, + "content": "Justification: The paper does not report error bars or formal statistical significance tests for the main experimental results, as our approach is deterministic and uses fixed dataset splits and pre-trained models. Metrics are reported as single values following common practice in recent long video QA benchmarks. While this is standard in the area, we acknowledge that including error bars or additional significance analysis would further strengthen the experimental evaluation." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.29, + 0.312, + 0.303 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.306, + 0.69, + 0.32 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not include experiments." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.321, + 0.825, + 0.363 + ], + "angle": 0, + "content": "- The authors should answer \"Yes\" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.364, + 0.825, + 0.405 + ], + "angle": 0, + "content": "- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions)." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.406, + 0.825, + 0.434 + ], + "angle": 0, + "content": "- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.)" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.435, + 0.735, + 0.449 + ], + "angle": 0, + "content": "- The assumptions made should be given (e.g., Normally distributed errors)." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.45, + 0.825, + 0.477 + ], + "angle": 0, + "content": "- It should be clear whether the error bar is the standard deviation or the standard error of the mean." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.479, + 0.825, + 0.519 + ], + "angle": 0, + "content": "- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a \\(96\\%\\) CI, if the hypothesis of Normality of errors is not verified." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.521, + 0.825, + 0.562 + ], + "angle": 0, + "content": "- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates)." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.564, + 0.825, + 0.592 + ], + "angle": 0, + "content": "- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.306, + 0.825, + 0.592 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.598, + 0.458, + 0.613 + ], + "angle": 0, + "content": "8. Experiments compute resources" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.618, + 0.826, + 0.66 + ], + "angle": 0, + "content": "Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?" + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.666, + 0.331, + 0.681 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.686, + 0.825, + 0.756 + ], + "angle": 0, + "content": "Justification: The paper specifies the computing environment in Appendix E.6, and reports both latency and FLOPs for major baselines and our method in Table 1. We also provide the number of iterations, average processing time, and model sizes in the main text and tables. This information is sufficient for others to estimate compute requirements and reproduce the experiments." + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.761, + 0.312, + 0.775 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.777, + 0.69, + 0.792 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not include experiments." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.793, + 0.825, + 0.82 + ], + "angle": 0, + "content": "- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.821, + 0.825, + 0.849 + ], + "angle": 0, + "content": "- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.85, + 0.825, + 0.892 + ], + "angle": 0, + "content": "- The paper should disclose whether the full research project required more compute than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper)." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.777, + 0.825, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.898, + 0.334, + 0.911 + ], + "angle": 0, + "content": "9. Code of ethics" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.23, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.125, + 0.331, + 0.139 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.144, + 0.828, + 0.215 + ], + "angle": 0, + "content": "Justification: The research follows the NeurIPS Code of Ethics. All datasets used are publicly available, appropriately licensed, and include human annotation with proper privacy safeguards (see Appendix D). No personally identifiable information or sensitive data is used. The proposed methods and experiments present no foreseeable risk of harm, discrimination, or privacy violation. Anonymity is preserved in all supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.218, + 0.31, + 0.231 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.233, + 0.812, + 0.246 + ], + "angle": 0, + "content": "- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.248, + 0.824, + 0.275 + ], + "angle": 0, + "content": "- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.277, + 0.825, + 0.305 + ], + "angle": 0, + "content": "- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction)." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.233, + 0.825, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.309, + 0.353, + 0.324 + ], + "angle": 0, + "content": "10. Broader impacts" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.329, + 0.825, + 0.358 + ], + "angle": 0, + "content": "Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.361, + 0.331, + 0.375 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.38, + 0.662, + 0.395 + ], + "angle": 0, + "content": "Justification: Our paper discusses broader impacts in Appendix J." + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.399, + 0.311, + 0.412 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.415, + 0.755, + 0.428 + ], + "angle": 0, + "content": "- The answer NA means that there is no societal impact of the work performed." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.43, + 0.824, + 0.457 + ], + "angle": 0, + "content": "- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.459, + 0.825, + 0.514 + ], + "angle": 0, + "content": "- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.515, + 0.825, + 0.611 + ], + "angle": 0, + "content": "- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.613, + 0.825, + 0.668 + ], + "angle": 0, + "content": "- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.669, + 0.825, + 0.724 + ], + "angle": 0, + "content": "- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML)." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.415, + 0.825, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.729, + 0.315, + 0.743 + ], + "angle": 0, + "content": "11. Safeguards" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.748, + 0.826, + 0.791 + ], + "angle": 0, + "content": "Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.795, + 0.331, + 0.809 + ], + "angle": 0, + "content": "Answer: [NA]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.813, + 0.825, + 0.897 + ], + "angle": 0, + "content": "Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.898, + 0.625, + 0.913 + ], + "angle": 0, + "content": "- The answer NA means that the paper poses no such risks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.232, + 0.092, + 0.825, + 0.147 + ], + "angle": 0, + "content": "- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.149, + 0.825, + 0.177 + ], + "angle": 0, + "content": "- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.178, + 0.825, + 0.219 + ], + "angle": 0, + "content": "- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.092, + 0.825, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.224, + 0.421, + 0.24 + ], + "angle": 0, + "content": "12. Licenses for existing assets" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.244, + 0.825, + 0.287 + ], + "angle": 0, + "content": "Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.291, + 0.33, + 0.306 + ], + "angle": 0, + "content": "Answer: [NA]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.31, + 0.825, + 0.395 + ], + "angle": 0, + "content": "Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.399, + 0.31, + 0.412 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.415, + 0.677, + 0.429 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not use existing assets." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.43, + 0.801, + 0.444 + ], + "angle": 0, + "content": "- The authors should cite the original paper that produced the code package or dataset." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.445, + 0.825, + 0.471 + ], + "angle": 0, + "content": "- The authors should state which version of the asset is used and, if possible, include a URL." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.473, + 0.75, + 0.487 + ], + "angle": 0, + "content": "- The name of the license (e.g., CC-BY 4.0) should be included for each asset." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.489, + 0.825, + 0.516 + ], + "angle": 0, + "content": "- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.518, + 0.825, + 0.572 + ], + "angle": 0, + "content": "- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.574, + 0.825, + 0.602 + ], + "angle": 0, + "content": "- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.603, + 0.825, + 0.63 + ], + "angle": 0, + "content": "- If this information is not available online, the authors are encouraged to reach out to the asset's creators." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.415, + 0.825, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.204, + 0.636, + 0.311, + 0.649 + ], + "angle": 0, + "content": "13. New assets" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.655, + 0.825, + 0.685 + ], + "angle": 0, + "content": "Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.689, + 0.33, + 0.704 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.709, + 0.827, + 0.807 + ], + "angle": 0, + "content": "Justification: We will release code for our VSLS framework upon publication, as mentioned in the abstract. The code will be accompanied by comprehensive documentation detailing the implementation of our four logical dependencies (spatial, temporal, attribute, and causal), the iterative refinement process, and instructions for reproducing our experimental results. Our paper does not introduce new datasets but rather evaluates our method on existing benchmarks including LONGVIDEOBENCH, VIDEO-MME, and HAYSTACK-LVBENCH, which are properly cited throughout the paper." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.811, + 0.31, + 0.824 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.827, + 0.676, + 0.84 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not release new assets." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.841, + 0.827, + 0.882 + ], + "angle": 0, + "content": "- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.884, + 0.825, + 0.911 + ], + "angle": 0, + "content": "- The paper should discuss whether and how consent was obtained from people whose asset is used." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.827, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.232, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file." + }, + { + "type": "title", + "bbox": [ + 0.203, + 0.125, + 0.586, + 0.141 + ], + "angle": 0, + "content": "14. Crowdsourcing and research with human subjects" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.145, + 0.825, + 0.187 + ], + "angle": 0, + "content": "Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.192, + 0.33, + 0.206 + ], + "angle": 0, + "content": "Answer: [NA]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.211, + 0.827, + 0.295 + ], + "angle": 0, + "content": "Justification: Our research does not involve crowdsourcing or human subject experiments. We evaluate our method using existing benchmarks (LONGVIDEOBENCH,VIDEO-MME, LONGVIDEOBENCH) that contain human-annotated ground truth data, but we did not collect new human annotations or conduct human evaluations as part of our work. Our methodology is purely algorithmic, focusing on the semantic-logical frameworks for keyframe selection and evaluation through computational metrics." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.3, + 0.31, + 0.312 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.315, + 0.825, + 0.342 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.344, + 0.825, + 0.386 + ], + "angle": 0, + "content": "- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.387, + 0.825, + 0.415 + ], + "angle": 0, + "content": "- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.315, + 0.825, + 0.415 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.203, + 0.419, + 0.825, + 0.448 + ], + "angle": 0, + "content": "15. Institutional review board (IRB) approvals or equivalent for research with human subjects" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.453, + 0.826, + 0.508 + ], + "angle": 0, + "content": "Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?" + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.514, + 0.33, + 0.528 + ], + "angle": 0, + "content": "Answer: [NA]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.533, + 0.827, + 0.603 + ], + "angle": 0, + "content": "Justification: Our research does not involve human subjects. We utilize existing benchmark datasets (LONGVIDEOBENCH, VIDEO-MME, HAYSTACK-LVBENCH) without collecting new data from human participants. Our work focuses on developing and evaluating algorithmic approaches for keyframe selection based on semantic-logical relationships, which do not require IRB approval or equivalent ethical review processes." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.608, + 0.31, + 0.621 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.623, + 0.825, + 0.651 + ], + "angle": 0, + "content": "- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.652, + 0.825, + 0.694 + ], + "angle": 0, + "content": "- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.695, + 0.825, + 0.736 + ], + "angle": 0, + "content": "- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.737, + 0.825, + 0.765 + ], + "angle": 0, + "content": "- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review." + }, + { + "type": "list", + "bbox": [ + 0.232, + 0.623, + 0.825, + 0.765 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.203, + 0.77, + 0.421, + 0.785 + ], + "angle": 0, + "content": "16. Declaration of LLM usage" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.79, + 0.827, + 0.846 + ], + "angle": 0, + "content": "Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required." + }, + { + "type": "text", + "bbox": [ + 0.231, + 0.851, + 0.33, + 0.865 + ], + "angle": 0, + "content": "Answer: [Yes]" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.87, + 0.827, + 0.912 + ], + "angle": 0, + "content": "Justification: Our Visual Semantic-Logical Search framework uses LLMs (specifically mentioned in Section 3.2 and Figure 2) as part of our query decomposition process. We employ models such as LLAVA-7B and GPT-40 to extract semantic information from" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.235, + 0.09, + 0.829, + 0.159 + ], + "angle": 0, + "content": "textual queries, including key objects, cue objects, and their logical relationships. This LLM-based decomposition is an integral component of our method, as it enables the identification of the four logical relation types (spatial, temporal, attribute, and causal) that guide our keyframe selection process. The prompt template for this query grounding is provided in Appendix H." + }, + { + "type": "title", + "bbox": [ + 0.231, + 0.166, + 0.31, + 0.179 + ], + "angle": 0, + "content": "Guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.181, + 0.825, + 0.209 + ], + "angle": 0, + "content": "- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components." + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.21, + 0.825, + 0.237 + ], + "angle": 0, + "content": "- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_origin.pdf b/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b0a1f0355aa25cfd27f58379417e66fb4b0fe947 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaacf150751cedb7180ab442f26700ed307d6e31bc43a06e48deefed459a4c4f +size 17732490 diff --git a/data/2025/2503_13xxx/2503.13139/full.md b/data/2025/2503_13xxx/2503.13139/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d37b764ac63a8ae9b58d88ee62286f4b35defbf8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/full.md @@ -0,0 +1,1078 @@ +# Logic-in-Frames: Dynamic Keyframe Search via Visual Semantic-Logical Verification for Long Video Understanding + +Weiyu Guo Ziyang Chen Shaoguang Wang Jianxiang He Yijie Xu AI Thrust, HKUST(GZ) + +{wguo395, zchen483, swang440, jhe307, yxu409}@connect.hkust-gz.edu.cn + +Jinhui Ye + +Shanghai AI Laboratory + +jinhuiyes@gmail.com + +Ying Sun* Hui Xiong* + +AI Thrust, HKUST(GZ) + +{yings, xionghui}@ust.hk + +# Abstract + +Understanding long video content is a complex endeavor that often relies on densely sampled frame captions or end-to-end feature selectors, yet these techniques commonly overlook the logical relationships between textual queries and visual elements. In practice, computational constraints necessitate coarse frame subsampling, a challenge analogous to "finding a needle in a haystack." To address this issue, we introduce a semantics-driven search framework that reformulates keyframe selection under the paradigm of Visual Semantic-Logical Search. Specifically, we systematically define four fundamental logical dependencies: 1) spatial co-occurrence, 2) temporal proximity, 3) attribute dependency, and 4) causal order. These relations dynamically update frame sampling distributions through an iterative refinement process, enabling context-aware identification of semantically critical frames tailored to specific query requirements. Our method establishes new SOTA performance on the manually annotated benchmark in key-frame selection metrics. Furthermore, when applied to downstream video question-answering tasks, the proposed approach demonstrates the best performance gains over existing methods on LONGVIDEOBENCH and VIDEO-MME, validating its effectiveness in bridging the logical gap between textual queries and visual-temporal reasoning. The code will be publicly available. + +# 1 Introduction + +Vision-Language Models (VLMs) Yin et al. (2024) have achieved remarkable progress in video understanding Zou et al. (2024); Tang et al. (2023), particularly in video question answering Wang et al. (2024c); Zhang et al. (2023), demonstrating potential for modeling real-world scenarios. However, existing methods can only simultaneously process a limited number of frames due to the inherent token limit and extremely high dimension of spatio-temporal video data, especially for long videos. Furthermore, uniformly sampled keyframes are query-agnostic and insufficient to represent query-related contents. To tackle these challenges, this paper addresses a pivotal research question: + +How can we efficiently and accurately select keyframes that are semantically critical for answering video-based queries? + +We hypothesize that deconstructing visual semantic and logical cues (e.g., target objects, logical relations including temporal, spatial, attribute, and causal relationships between visual entities) from textual queries enables effective identification of task-relevant frames through heuristic sampling and search. Building on this insight, we propose Visual Semantic-Logical Search (VSLS), a novel keyframe search framework that incorporates target object confidence estimation and joint verification of visual semantic logic into the iterative update of frame sampling distribution and selects the most informative frames with the highest confidence. Experimental results show that our + +Q: In the video, what color pen did the author use when he wrote + +``` +\"guitar\" for the second time? + +![](images/953c9d83f6870c26dad9968e0c39a6e9242291e0ea0bf567b0874126d0a19571.jpg) +Temporal +A) Brown +Spatial + +![](images/ac70dfed2f93f2e57ec2e765bb99ce5996cc84efe94f743f0640b0f5b681d414.jpg) +(text, time, pen) +B) Pink +(copilot, spatial, Egyptian Pyramids) + +Q:At the end of the animation, which building does the airplane fly over? + +A) The Eiffel Tower. + +B) The Egyptian Pyramids + +![](images/2cbbe9459cd07a11b8b6cf63240b6eb53b964cbf2cfd943bf25bbe734517e78c.jpg) +RED—Baseline Answer + +![](images/38acc18505c76dd154f63dcc30a60695263952fd5e1c996c5dbaf6981479fc57.jpg) +Our Answer +Figure 1: Examples of four types of visual semantic-logical relationships in video QA detected by our VSLS framework: Temporal (text, time, pen), Attribute (man, attribute, white shirt), Spatial (copilot, spatial, Egyptian Pyramids), and Causal (man, causal, basketball). Green boxes indicate correct answers, while red boxes show baseline errors. + +Q: In a room with a wall tiger and a map on the wall, there is a man wearing a white shirt. What is he doing? + +A) gazing at a circuit board + +![](images/ea37ec53eb15b2a0ff0e8bdea1cc5566bbdc337da0a7d13b1c404d2c4da49c1e.jpg) +Attribute +Causal + +![](images/c2bb1f52e9e19170d0c025e96d90330f57871bc152ba1bbb27307286e8aafd7e.jpg) +(man, attribute, white shirt) +B) speaking +(man, causal, basketball) + +Q:After a man wearing a red short-sleeved shirt and a black hat finished + +speaking in front of a black background, what did this me + +A) picked up a mobile phone. + +![](images/94231a5d2a0704895637946eabe85545b336071375abcfbbf71d4801fdb3ce38.jpg) + +![](images/c61ac3848f91015fed47389a4b36e33e2e9c4471568eacab710f50bb62574e37.jpg) + +approach requires only sparse sampling (1.4% of frames per video on average) to identify critical frames, significantly reducing computational complexity compared to conventional dense sampling strategies while maintaining performance on downstream video understanding tasks. + +Compared to conventional methods, VSLS shows three distinct advantages. First, the framework is training-free and highly efficient in comparison with dense captioning Chen et al. (2024c); Kim et al. (2024); Wang et al. (2024b) or video clustering Wang et al. (2024e); Rajan and Parameswaran (2025) strategies, sampling only $1.4\%$ of frames on average in LVHAYSTACK. Second, it explicitly models logical binary relations (namely spatial, temporal, attribute, and causal) in the query beyond simple target detection Ye et al. (2025b), utilizing additional visual semantic features and enhancing logical consistency throughout the reasoning process. Third, VSLS is a plug-and-play module, which can be seamlessly integrated into existing VLM pipelines without cross-component dependencies. + +We further examine VSLS on several public datasets, including LONGVIDEOBENCH Ye et al. (2025a), a comprehensive benchmark for long video understanding; VIDEO-MME Fu et al. (2024), a widely adopted multimodal video question answering dataset; and HAYSTACK-LVBENCH Ye et al. (2025a) with meticulously annotated keyframes based on human feedback for more precise analysis. Extensive experiments demonstrate significant improvements in both the semantic similarity and temporal coverage between the retrieved keyframes and the ground truth labels, as well as the accuracy in downstream video question-answering tasks. More importantly, with only $1.4\%$ of video frames (EGO4D Grauman et al. (2022)) sampled in the search iteration, our method achieves an $8.7\%$ improvement in GPT-4o Hurst et al. (2024)'s long video QA accuracy. This performance gain is attributed to our simple yet powerful observation: query-guided visual semantic logic retrieval can mitigate the gap between potential visual logic in video frames and the logic expressed in the query. To be specific, constructing ternary logic triplets with visual elements (e.g., object1, logic type, object2) can enhance downstream reasoning capabilities when performing textual-visual retrieval. + +To the best of our knowledge, we are arguably the first to search for keyframes in long videos by detecting visual semantic logic, with potential extensions to other textual-visual retrieval tasks. Our main contributions are as follows: + +- We define four fundamental types of semantic logic relations in video QA tasks, including temporal, causal, attribute, and spatial relations, which can be accurately detected across various datasets. +- We sample only $1.4\%$ of frames on average of frames on average during keyframe search through heuristic sampling and distribution updating by different visual semantics and logical relations. +- We comprehensively evaluate retrieval efficiency, semantic similarity, temporal coverage, and video question answering accuracy across several widely used video understanding datasets, demonstrating significant improvements in downstream tasks. + +![](images/b122957dd055813357d936dff056404604c37dce8079ebfdf60a5643e31f325f.jpg) +Figure 2: Our VSLS Framework for Efficient Keyframe Selection. VSLS sparsely samples frames and selects key ones via object detection and logic verification. Steps: 1) Use LLM&VLM to extract cue/target objects and four logic types (spatial, temporal, attribute, causal); 2) Adaptive sampling with evolving confidence; 3) Detect objects viaYOLO-WORLD; 4) Fuse scores with a spline function to identify high-confidence frames for downstream tasks. + +# 2 Method + +Although existing long-context VLM frameworks implement keyframe search for video QA tasks Liang et al. (2024); Park et al. (2024); Tan et al. (2024); Wang et al. (2024a,d); Yu et al. (2024), their computational efficiency and searching accuracy remain suboptimal. To address this needle-in-a-haystack challenge Wang et al. (2025); Zhao et al. (2024), we propose a novel method VSLS that aligns the semantic relations between the text modality and video modality, enhancing the plausibility of logical reasoning and performance of downstream tasks. + +# 2.1 Task Formulation + +Given a video sequence $V = \{f_t\}_{t=1}^{N_v}$ with $N_v$ frames and a query $Q$ , the ideal temporal search framework aims to retrieve the minimal keyframe subset $V^K = \{f_{m_i}\}_{i=1}^K \subseteq V$ with $K$ keyframes that satisfies: + +- Conservation: The keyframe subset $V^K \subseteq V$ must satisfy the answer consistency condition: $\mathcal{A}(V^K, Q) = \mathcal{A}(V, Q)$ , where $\mathcal{A}(\cdot)$ denotes the video QA function. +- Compactness: $V^K$ must be a minimal subset that preserves completeness, which means that no frame in $V^K$ can be removed without hindering the accuracy and efficiency of video QA. + +# 2.2 Visual Semantic Logic Extraction + +Starting from a question $Q$ and uniformly sampled frames $\overline{V}_N$ from video $V$ , our goal is to extract key visual elements to answer $Q$ . We first classify the detected objects in $Q$ and $\overline{V}_N$ into two categories: + +- Key Objects: The main participants or references in the scene that the question explicitly or implicitly focuses on (e.g., "person", "microphone"). +- Cue Objects: Secondary or contextual entities that help locate or disambiguate the Key Objects (e.g., "book", "tiger painting"). + +To further leverage semantic and logical links among these objects, we define a set of relations $\mathcal{R} \subseteq \mathcal{O} \times \Delta \times \mathcal{O}$ , where each relation $r = (o_i, \delta, o_j) \in \mathcal{R}$ , with $o_i, o_j \in \mathcal{O}$ denoting detected objects in the key and cue objects dataset, and $\delta \in \Delta$ representing one of the following types of relations: + +
Spatial Co-occurrenceAttribute Dependency
oi and oj appear in the same frame, indicating co-occurrence or proximity. +Example: “A person is standing beside a vase.” +⇒ (person, spatial, vase)oi and oj share visual properties, e.g., color or size. +Example: “A person wears a black shirt.” ⇒ +(person, attribute, black shirt)
Temporal ProximityCausal Order
oi and oj occur in close frames, linking sequences or transitions. +Example: “After a dog entered the room, a cat entered.” ⇒ (dog, temporal, cat)oi and oj follow a cause-effect or prerequisite order. +Example: “A little girl broke the vase.” ⇒ +(little girl, causal, pieces)
+ +Algorithm 1: Visual Semantic-Logical Search +Function SemanticLogicalTemporalSearch(V,Q,K, $\Delta_t,\tau ,\alpha ,\gamma$ $\mathcal{O},\mathcal{R}\gets$ ParseQuestion(Q) // Extract key/cue objects and relations + $P\leftarrow$ Uniform, $B\leftarrow |V|,S\leftarrow \emptyset ,N_{v}\leftarrow |V|$ // Initialize distribution and state +while $B > 0$ and $|\mathcal{O}| > 0$ do + $k\gets \lfloor \sqrt{B}\rfloor ,G\gets$ Grid(Sample $(P,k^2)$ ) // Adaptive grid sampling + $\Omega \gets$ DetectObjects(G) // Detect objects in sampled frames +foreach $t\in G$ do + $C_t\gets$ CalculateBaseScore( $\Omega_t$ ) // Base detection confidence +foreach $r_{type}\in \mathcal{R}$ do + $\delta \gets$ Processrelation(rtype, $\Omega ,\Delta_t,\tau ,\alpha ,\gamma)$ //relations require distinct processing + $C_t\gets C_t + \delta$ UpdateScores $(S,t,C_t)$ //Update global score registry +DiffuseScores(S,w) // Temporal context propagation + $P\gets$ NormalizeDistribution(S), $B\gets B - k^{2}$ // Update sampling distribution +foreach $g\in \mathrm{TopK}(S,K)$ do +if $\Omega [g]\cap \mathcal{O}\neq \emptyset$ then // Remove identified key objects + $\begin{array}{rlrl} & {\mathcal{O}} & {\leftarrow \mathcal{O}\backslash \Omega [g]} & {} \end{array}$ + +The choice of these four relations draws on core concepts in linguistics and logic Cohen (1968); Sowa (2000); Talmy (2000), which identify spatial, temporal, attributive, and causal aspects as fundamental for structuring, perceiving, and communicating information about events and states. For more details on this selection, please see appendix A for reference. As shown in Figure 1, we construct semantic-logical relations that support a broad range of question-answering tasks. Specifically, questions involving temporal queries (when does $X$ happen?), causal reasoning (why did $Y$ occur?"), attribute dependence (What is the person wearing sunglasses doing?), or spatial constraints (Who is standing next to the red car?) can be answered more reliably by incorporating these structured relations and contextual cues. + +# 2.3 Iterative Semantic-Logical Temporal Search + +Based on the extracted key and cue objects and their logic relations, our algorithm iteratively searches for keyframes through semantic and logical reasoning, including four main stages: Frame Sampling (Sec. 2.3.1), Object Detection and Scoring (Sec. 2.3.2), Visual Semantic Logic Detection (Sec. 2.3.3), and Distribution Update (Sec. 2.3.4). The pseudocode is shown in Algorithm 1, and Algorithm 2 provides a more detailed version. + +# 2.3.1 Frame Sampling + +To accelerate the search process, we avoid exhaustively scanning all $N_v$ video frames and instead employ a distributed sampling strategy. Let $N_v$ denote the total number of frames in the video, and $P$ be a uniformly initialized sampling distribution over all frames. The sampling process is then defined as: + +$$ +I _ {s} = \operatorname {S a m p l e} \left(P \odot N _ {v}, N _ {s}\right), \tag {1} +$$ + +where $\mathrm{Sample}(\cdot ,N_s)$ selects a subset of $N_{s}$ frames according to the distribution $P\odot N_v$ . To further leverage the detecting ability ofYOLO, we stack the sampled frames into a $k\times k$ grid, which imposes a constraint on the sample size $N_{s}$ . Specifically, we require: + +$$ +N _ {s} \in \{k ^ {2} \mid k \in \mathbb {Z} \} \quad \text {a n d} \quad N _ {s} < N _ {v}. \tag {2} +$$ + +In practice, this ensures that the number of sampled frames can be reshaped into a compact 2D grid for efficient processing. Although $P$ is initially uniform, it can be adapted over multiple rounds of sampling to focus on frames of higher interest in the video. + +# 2.3.2 Object Detection and Scoring + +In this stage, we construct the detection search space by taking the union of both key objects and cue objects. For each iteration, we detect objects on the $N_{s}$ sampled frames using a lightweight model like YOLO-WORLD Cheng et al. (2024a) for high efficiency and score the frames based on detection + +![](images/f4ce9972802824b1e904f47e93a3d933772e13b6530cb6d80f8fb6b800e4bad8.jpg) +Figure 3: Sample weight evolution under VSLS optimization for keyframe selection. Top: 16 iterations show progressive convergence toward Ground Truth (red). Bottom: 15 iterations demonstrate similar alignment. Yellow highlights indicate precise matches between algorithm outputs (green) and manual annotations. + +confidence. Specifically, let $\Omega_t$ be the set of detected objects in the frame at time $t$ , $c_o$ the confidence of each detected object, and $w_o$ the corresponding weight. We define the frame score as: + +$$ +C _ {t} = \max _ {o \in \Omega_ {t}} \left(c _ {o} \cdot w _ {o}\right). \tag {3} +$$ + +If the confidence score of any key object exceeds a predefined threshold, it is added to a list, thereby maintaining a record of frames where crucial targets have been identified for subsequent processing. + +# 2.3.3 Visual Semantic Logic Detection + +Beyond individual object detection and frame-level scoring, we refine each frame's confidence score by modeling higher-order object relations. Let $\mathcal{R}$ be the set of relations, where each $r\in \mathcal{R}$ involves a pair $(o_1,o_2)$ and is labeled by a type $r_{\mathrm{type}}$ . Denote $C_t$ as the confidence score at time $t$ , with a global scaling factor $\alpha$ and a relation-specific weight $\gamma_{r_{\mathrm{type}}}$ controlling each logic type's impact. The refined confidence $C_t^{(r)}$ after applying relation $r$ is defined as: + +$$ +C _ {t} ^ {(r)} = C _ {t} + \alpha \cdot \gamma_ {r _ {\text {t y p e}}}. \tag {4} +$$ + +Spatial Relation. A spatial relation enforces that two objects $o_1$ and $o_2$ must co-occur in the same frame. Let $\Omega_t$ be the set of detected objects in frame $t$ . If both $o_1 \in \Omega_t$ and $o_2 \in \Omega_t$ , then the corresponding frame confidence is updated as: + +$$ +C _ {t} \leftarrow C _ {t} + \alpha \cdot \gamma_ {\text {s p a t i a l}}. \tag {5} +$$ + +Attribute Relation. An attribute relation is satisfied when $o_1$ and $o_2$ share sufficient bounding-box overlap in the same frame. Let overlap be the ratio of their intersection area to the minimum of their individual bounding-box areas. If the overlap ratio exceeds a predefined threshold $\tau$ ( $\tau = 0.5$ in our experimental setting), we increase the frame confidence: + +$$ +C _ {t} \leftarrow C _ {t} + \alpha \cdot \gamma_ {\text {a t t r i b u t e}}. \tag {6} +$$ + +Time Relation. A time relation checks whether two objects appear in temporally close frames. Suppose $t_i$ and $t_j$ ( $t_i \leq t_j$ ) are sampled such that $|t_j - t_i| < \Delta_t$ , where $\Delta_t$ is a threshold (e.g. 5 frames in our experimental setting), if $o_1$ occurs in frame $t_i$ and $o_2$ in frame $t_j$ , then both frames' confidence are updated: + +$$ +C _ {t _ {i}} \leftarrow C _ {t _ {i}} + \alpha \cdot \gamma_ {\text {t i m e}}, \quad C _ {t _ {j}} \leftarrow C _ {t _ {j}} + \alpha \cdot \gamma_ {\text {t i m e}}. \tag {7} +$$ + +Causal Relation. A causal relation models an ordering constraint, enforcing that $o_1$ must appear at an earlier time than $o_2$ . Specifically, if $o_1 \in \Omega_{t_i}$ and $o_2 \in \Omega_{t_j}$ with $t_i < t_j$ , we update the confidence of frames $t_i$ and $t_j$ by: + +$$ +C _ {t _ {i}} \leftarrow C _ {t _ {i}} + \alpha \cdot \gamma_ {\text {c a u s a l}}, \quad C _ {t _ {j}} \leftarrow C _ {t _ {j}} + \alpha \cdot \gamma_ {\text {c a u s a l}}. \tag {8} +$$ + +Through this scoring mechanism, frames with detected relations will have greater confidence and are more likely to be retrieved as keyframes for the given query and video. We have also conducted hyperparameter search experiments, and find that $\alpha = 0.3$ (from 0.3, 0.5, 0.7, 1.0) and $\gamma_{r_{\mathrm{type}}} = 0.5$ achieve the best results across different datasets. + +# 2.3.4 Distribution Update + +After each iteration of frame sampling, we merge newly obtained frame confidences into the global score distribution $\{S_f\}$ spanning all frames $f = 1,2,\dots ,N_v$ . When a frame $f$ is selected for detection, its score is assigned to the confidence value $C_f$ , and the visitation counter $N_{v,f}$ is reset to 0. To incorporate temporal context, we diffuse this updated score to neighboring frames within a window of size $w$ . Denoting each nearby index by $f\pm \delta$ (for $\delta \in [-w,w]$ ), we apply: + +$$ +S _ {f \pm \delta} \leftarrow \max \left(S _ {f \pm \delta}, \frac {S _ {f}}{1 + | \delta |}\right). \tag {9} +$$ + +In this way, high-confidence frames raise the scores of close-by frames, reflecting temporal continuity. Following these local updates, the sampling distribution $P$ is refined using spline interpolation, and then normalized. This iteration proceeds until either the search budget $B$ is reached or all key objects have been successfully identified. The visualization of distribution in different iterations can be seen in Figure 3. Finally, the method outputs the top $K$ frames according to their terminal scores. + +# 3 Experiment + +# 3.1 Benchmark Datasets + +The proposed VSLS is systematically evaluated across four benchmark datasets: a) LONGVIDEOBENCH Ye et al. (2025a) for assessing long-context video-language comprehension capabilities; b) Video-MME Fu et al. (2024) as the first comprehensive benchmark for multimodal video analytics; c) HAYSTACK-LVBENCH, extended from LONGVIDEOBENCH with human-annotated frame index answers; and d) HAYSTACK-EGO4D, derived from EGO4D with similar annotations. While LONGVIDEOBENCH and Video-MME measure performance enhancement in QA accuracy, HAYSTACK-EGO4D and HAYSTACK-LVBENCH quantitatively evaluate keyframe selection accuracy through recall and precision metrics. Further details of datasets are provided in Appendix D. + +# 3.2 Evaluation Metrics + +# 3.2.1 Evaluation Metrics for Search Utility + +Our assessment framework emphasizes both effectiveness and efficiency. For search effectiveness, we use three metrics to compare model-predicted keyframes with human annotations, considering both individual frames and full sets—addressing the possibility of multiple valid keyframe sets per query. For frame-level comparison, we evaluate the alignment between a predicted frame $f_{\mathrm{pt}}$ and a human-annotated frame $f_{\mathrm{gt}}$ from two perspectives: + +Temporal coverage evaluates the coverage of ground truth frames by predicted frames in the temporal perspective, which can be described as: + +$$ +T _ {\text {c o v e r}} \left(T _ {\mathrm {p t}}, T _ {\mathrm {g t}}\right) = \frac {\sum_ {i = 1} ^ {| N _ {\mathrm {g t}} |} \mathbb {I} \left[ \min _ {j} \left| t _ {\mathrm {g t}} ^ {i} - t _ {\mathrm {p t}} ^ {j} \right| \leq \delta \right]}{| N _ {\mathrm {g t}} |}, \tag {10} +$$ + +where $T_{\mathrm{pt}}$ and $T_{\mathrm{gt}}$ denote the sets of predicted and ground truth timestamps, respectively. Here, $|N_{\mathrm{gt}}|$ is the number of ground truth frames, $t_{\mathrm{gt}}^i$ and $t_{\mathrm{pt}}^j$ are the $i$ -th ground truth and $j$ -th predicted timestamps, respectively. $\delta$ is the temporal similarity threshold defining the maximum allowed time deviation, and $\mathbb{I}[\cdot]$ is the indicator function, returning 1 if the condition holds and 0 otherwise. + +Visual Similarity is measured by the Structural Similarity Index (SSIM) Brunet et al. (2012), capturing structural detail, luminance, and contrast between $f_{\mathrm{pt}}$ and $f_{\mathrm{gt}}$ . For set-to-set comparison, the key challenge is defining inter-set similarity. We adopt Precision $P$ and Recall $R$ as complementary metrics: Precision checks whether each predicted frame matches any reference frame, while Recall ensures that all reference frames are represented. Given the ground truth set $F_{\mathrm{gt}} = f^{j}\mathrm{gt}^{n}j = 1$ and the predicted set $F_{\mathrm{pt}} = f^{i}\mathrm{pt}^{m}i = 1$ , we define the multimodal retrieval quality metrics as follows: + +$$ +\left\{ \begin{array}{l} P \left(F _ {\mathrm {p t}}, F _ {\mathrm {g t}}\right) = \frac {1}{\left| F _ {\mathrm {p t}} \right|} \sum_ {f _ {\mathrm {p t}} ^ {i} \in F _ {\mathrm {p t}}} \max _ {f _ {\mathrm {g t}} ^ {j} \in F _ {\mathrm {g t}}} \phi \left(f _ {\mathrm {p t}} ^ {i}, f _ {\mathrm {g t}} ^ {j}\right), \\ R \left(F _ {\mathrm {p t}}, F _ {\mathrm {g t}}\right) = \frac {1}{\left| F _ {\mathrm {g t}} \right|} \sum_ {f _ {\mathrm {g t}} ^ {j} \in F _ {\mathrm {g t}}} \max _ {f _ {\mathrm {p t}} ^ {i} \in F _ {\mathrm {p t}}} \phi \left(f _ {\mathrm {g t}} ^ {j}, f _ {\mathrm {p t}} ^ {i}\right), \end{array} \right. \tag {11a} +$$ + +where $\phi (\cdot ,\cdot)$ represents an extensible multimodal similarity metric function. + +
MethodTraining RequiredSearching EfficiencyOverall Task Efficiency
MatchingIterationTFLOPs ↓Latency (sec) ↓Latency (sec) ↓Acc ↑
Static Frame Sampling
UNIFORM-8 Ye et al. (2025a)Training-BasedN/AN/AN/A0.23.853.7
Dense Retrieval
VIDEOAGENT Fan et al. (2024)Training-BasedCLIP-1B Radford et al. (2021)840536.530.234.949.2
T*-RETRIEVAL Ye et al. (2025b)Training-BasedYOLO-WORLD-110M840216.128.632.257.3
Temporal Search
T*-ATTENTION Ye et al. (2025b)Training-BasedN/AN/A88.913.717.359.3
T*-DETECTOR Ye et al. (2025b)Training-FreeYOLO-WORLD-110M4331.77.311.159.8
VSLS (OURS)-DETECTORTraining-FreeYOLO-WORLD-110M4933.37.811.661.5
+ +Table 1: Evaluation of performance metrics across the LV-HAYSTACK benchmark, presenting both search efficiency and end-to-end processing overhead (combining search and inference stages). + +# 3.2.2 Evaluation Metrics for Search efficiency + +Existing studies Fan et al. (2024); Park et al. (2024); Wang et al. (2024a,d); Wu and Xie (2023) have mainly concentrated on optimizing task-specific performance metrics while neglecting computational efficiency in temporal search operations. To systematically analyze this dimension, our evaluation framework incorporates two criteria: 1) FLOPs representing arithmetic operation complexity, and 2) Latency recording real-world execution duration. + +# 3.3 Evaluation of Search Framework efficiency + +Current approaches for keyframe selection can be broadly categorized into three paradigms: statistic-based frame sampling, dense feature retrieval-based selection, and temporal search-based methods. As shown in Table 1, while uniform sampling achieves the fastest processing speed, its ignorance of frame semantics severely limits downstream task effectiveness. Although dense feature retrieval methods attain moderate accuracy improvements (57.3%), their exhaustive frame processing demands $4.2 \times$ more TFLOPs and introduces $4.5 \times$ higher latency than our temporal search approach. Crucially, our method introduces four visual semantic logic detectors during temporal search while maintaining comparable execution time to T* methods. This strategic design elevates downstream task accuracy to $61.5\%$ , achieving the best performance-efficiency trade-off. + +# 3.4 Visual Semantic Logic Search Performance + +As demonstrated in Table 2, we evaluate VSLS on LONGVIDEOBENCH from two critical perspectives: visual similarity (measured by precision and recall) and temporal coverage. Our method achieves state-of-the-art performance across all metrics. Specifically, under the 32-frame setting, VSLS attains a precision of $74.5\%$ and recall of $92.5\%$ , outperforming all baselines in visual similarity. More notably, the temporal coverage of VSLS reaches $41.4\%$ , surpassing the second-best method ( $T* at 36.5\%$ ) by $13.4\%$ —the largest margin among all comparisons. This significant improvement highlights the effectiveness of our visual semantic logic detection modules in identifying query-relevant keyframes with both semantic alignment and temporal completeness. + +These results empirically support our core hypothesis: leveraging semantic and logical cues from text queries enables precise detection of relevant video frames. Improvements in visual similarity and temporal coverage confirm that VSLS effectively captures keyframes while preserving temporal coherence through visual-logical alignment. + +# 3.5 Downstream Video QA Performance + +To demonstrate the advantages of VSLS, we evaluate downstream video QA performance on LONGVIDEOBENCH and VIDEO-MME. As shown in Table 3, videos are grouped by length into Short, Medium, and Long (15-3600s, up to 60 mins). VSLS consistently achieves the highest accuracy in the long-video category across different frame counts and QA models. Compared to the baseline T*, incorporating our visual semantic logic relations (Figure 1) yields substantial gains. + +Table 2: Search utility results on LONGVIDEOBENCH. Best scores in the 8-frame setting are underlined, and in the 32-frame setting are bold. Gray indicates results from the original paper. + +
MethodFrameLONGVIDEOBENCH
Precision ↑Recall ↑Time ↑
Static Frame Sampling Method
UNIFORM Ye et al. (2025a)856.072.06.3
UNIFORM860.780.44.7
UNIFORM3258.781.624.9
UNIFORM3260.285.08.1
Dense Retrieval Method
VIDEOAGENT Fan et al. (2024)10.158.873.28.5
RETRIEVAL-BASED Ye et al. (2025b)863.165.56.3
RETRIEVAL-BASED3259.980.821.8
Temporal Searching Method
T* Ye et al. (2025b)858.472.77.1
T*875.388.226.2
VSLS (ours)875.688.626.3
T*3258.383.228.2
T*3274.090.336.5
VSLS (ours)3274.592.541.4
+ +These results confirm that modeling visual-logical relations is key to effective QA on long videos. + +
LONGVIDEOBENCHVIDEO-MME
Model and SizeFrameVideo LengthModel and SizeFrameVideo Length
Long 900-3600sMedium 180-600sShort 15-60sLong 30-60minMedium 4-15min
GPT-4o Hurst et al. (2024)847.149.467.3GPT-4o855.260.2
GPT-4o + T*849.156.268.0GPT-4o + T*855.261.2
GPT-4o + VSLS (ours)851.258.974GPT-4o + VSLS (ours)856.960.7
INTERNVL 2.5-78B Chen et al. (2024d)855.757.374.0INTERNVL 2.5-78B852.655.5
INTERNVL 2.5-78B + VSLS (ours)858.061.574.0INTERNVL 2.5-78B + VSLS (ours)857.757.5
GPT-4o3253.856.574.0GPT-4o3255.261.0
GPT-4o + T*3255.358.872.0GPT-4o + T*3255.261.6
GPT-4o + VSLS (ours)3254.260.076.0GPT-4o + VSLS (ours)3255.261.9
LLAVA-ONEVISION-QWEN2-78B-OV3259.363.977.4LLaVA-OneVision-78B3260.062.2
PLLAVA-34B3249.150.866.8VIDEOLLAMA 23257.659.9
LLAVA-VIDEO-78B-QWEN212859.363.977.4ORYX-1.512859.365.3
MPLUG-OWL3-7B12853.958.873.7ARIA-8x3.5B25658.867.0
GPT-4o (0513)25661.666.776.8GEMINI-1.5-Pro (0615)1/0.5 fps67.474.3
+ +Table 3: Downstream task evaluation results on two benchmarks. All accuracy scores (\%) in black are from our replication. We also cite the reported accuracy of SOTA models in gray (noting that their settings may differ and results may not be reproducible), along with their number of frames used for QA inference, for full transparency. + +# 4 Analysis + +# 4.1 Coverage Analysis of Semantic-Logical Relations + +To ascertain the practical applicability and coverage of our defined semantic-logical relations (spatial, temporal, attribute, and causal), we conducted an analysis of their detection across all queries in the LongVideoBench and VideoMME datasets. Our findings reveal a crucial insight: for every question posed within these extensive VQA benchmarks, our query analysis module successfully identified and mapped the query to at least one of the four defined logical relation types. This empirical result supports the completeness of our proposed relation set for interpreting the semantic and logical intent inherent in these VQA tasks. + +# 4.2 Time Complexity + +The proposed framework consists of two stages. First, VLMs such as LLAVA-7B and GPT-40 extract a semantic set $S$ from a video $V$ with $n$ frames. $S$ includes target objects, cue objects, and their relations, with their size constrained by prompt design. In the second stage, keyframe identification is performed via a heuristic search: $k$ candidates are iteratively selected using a scoring function $h(\cdot ,S)$ . The score distribution scores $[n]$ is dynamically refined using outputs from the YOLO-WORLD detector. + +![](images/70069406c3ee25f7805367e06c6d48170b97455f4be6514bb40965b70097b46f.jpg) +Figure 4: Average occurrences of detected semantic-logical relation types per question on the VideoMME and LongVideoBench datasets. Spatial relations are the most frequently identified, while all queries in both datasets triggered at least one of the four relation types. + +Our analysis focuses on YOLO-WORLD detections, the main computational bottleneck due to their reliance on deep neural networks. Reducing the number of detections improves efficiency without sacrificing accuracy. At each iteration, the detector processes $k$ selected frames to match objects and relations in $S$ , yielding $k$ detections. The + +search stops when all targets are found or the iteration budget $\min(1000, 0.1 \times V_t)$ (with $V_t$ as the video duration in seconds) is exhausted. In the worst case (e.g., videos with $>10,000$ frames and no matches), the cap is 1,000 iterations. Ideally, the evaluation function $h(\cdot, S)$ assigns high confidence to target frames, making the algorithm resemble top- $k$ selection over $n$ candidates in $\mathcal{O}(|S| \log n)$ iterations Ye et al. (2025b), resulting in an average of $\mathcal{O}(|S| k \log n)$ YOLO-WORLD inferences. + +Experimental results also demonstrate that integrating relational information into the search algorithm incurs negligible computational overhead compared to the baseline T* approach. On the LV-HAYSTACK benchmark, the average iteration count increases from 42.94 (T*) to 48.82 iterations, representing a modest $13.69\%$ rise in the time cost. + +# 4.3 Ablation Study of Four Relations + +Figure 4 illustrates the distribution of four logic relation types across LONGVIDEOBENCH and VIDEO-MME datasets, where spatial relations predominate, followed by attribute relations. In Table 4, we extract samples containing different relation types from LONGVIDEOBENCH to compare the object detection-based T* method with our VSLS approach. Experimental results demonstrate that VSLS achieves significant improvements across both image similarity metrics (SSIM Precision and SSIM Recall). Additionally, temporal coverage shows marked enhance + +
Logic TypeMethodLONGVIDEOBENCH
Precision ↑Recall ↑TC ↑
SpatialT*72.988.737.5
VSLS (ours)73.691.445.5
AttributeT*71.887.638.5
VSLS (ours)72.790.942.1
TimeT*76.789.237.3
VSLS (ours)77.592.536.1
CasualT*74.792.438.6
VSLS (ours)74.793.839.6
+ +Table 4: Comparison of our method (VSLS) with the baseline across four logic relation types on LONGVIDEOBENCH. Precision: SSIM Precision; Recall: SSIM Recall; TC: Temporal Coverage. + +ment for attribute, spatial, and causal relations, with spatial relations exhibiting the most substantial improvement (21.3% increase over T*). For the time relation category, we observe a slight decrease in temporal coverage, which may be attributed to the relative scarcity of time relation samples in the dataset, limiting the opportunity to demonstrate the advantages of VSLS. Nevertheless, Figure 1 provides visual evidence of how effectively leveraging time relations can facilitate downstream question-answering tasks. + +# 5 Related Work + +Challenges in Long Video Understanding: Long video understanding is inherently more challenging than short-video or image-based tasks due to its rich temporal dynamics and massive redundancy Qian et al. (2024); Zeng et al. (2024); Yu et al. (2019). The large number of frames increases both memory and computational requirements, making straightforward dense sampling infeasible. Moreover, crucial events may span distant timestamps, demanding high-capacity models to capture long-range dependencies Ranasinghe et al. (2025); Shi et al. (2024); Chen et al. (2024b); Weng et al. (2024). Meanwhile, the diverse and continuous visual content raises noise and distractors; thus, strategies to effectively locate or distill essential parts of the video are of primary importance Zhang et al. (2023); Cheng et al. (2024b); Xu et al. (2023); Ye et al. (2025b). + +Existing Solutions based on VLMs typically share three core ideas: 1) video sampling or retrieval for efficiency, 2) multi-stage or interactive reasoning to handle complex questions, and 3) compact representation to accommodate the VLM's limited context window. For instance, retrieval-based pipelines partition a video into segments and employ a learned or rule-based retriever to identify the relevant chunks before passing them to a VLM Pan et al. (2023); Choudhury et al. (2023, 2025). Other lines of research compress each frame into minimal tokens to reduce computational overhead Li et al. (2024); Chen et al. (2024a); Song et al. (2024), or adopt a streaming mechanism to propagate memory representations along the temporal axis Qian et al. (2024); Wu et al. (2022); Liu et al. (2024). Beyond these efficiency-oriented approaches, LLM/VLM-as-planner frameworks factorize the process into a series of perception queries, enabling an agent to fetch additional frame-level details if needed Wang et al. (2024b); Zhang et al. (2024); Liao et al. (2024). + +# 6 Conclusion + +In this paper, we present Visual Semantic-Logical Search (VSLS), a novel framework that efficiently selects semantically keyframes for long video understanding by decomposing logical relationships between textual queries and visual elements. VSLS based on four defined logical dependencies (spatial co-occurrence, temporal proximity, attribute dependency, and causal order), significantly outperforms existing methods while sampling only $1.4\%$ of video frames. The $8.7\%$ improvement in GPT-40's long video QA accuracy demonstrates that query-guided visual semantic logic search effectively bridges the gap between textual queries and visual content. VSLS's plug-and-play nature enables seamless integration with existing pipelines, making it practical for real-world applications. Future work could consider more logical relations, learnable search methods, enhancing interpretability, and exploring more downstream tasks. + +# References + +Dominique Brunet, Edward R. Vrscay, and Zhou Wang. On the mathematical properties of the structural similarity index. IEEE Transactions on Image Processing, 2012. +Jieneng Chen, Luoxin Ye, Ju He, Zhao-Yang Wang, Daniel Khashabi, and Alan Yuille. Llavolta: Efficient multi-modal models via stage-wise visual context compression. In arXiv preprint arXiv:2406.20092, 2024a. +Jr-Jen Chen, Yu-Chien Liao, Hsi-Che Lin, Yu-Chu Yu, Yen-Chun Chen, and Yu-Chiang Frank Wang. ReXTime: A benchmark suite for reasoning-across-time in videos. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b. +Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Zhenyu Tang, Li Yuan, et al. Sharegpt4video: Improving video understanding and generation with better captions. NeurIPS, 37:19472-19495, 2024c. +Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024d. +Tianheng Cheng, Lin Song, Yixiao Ge, Wenyu Liu, Xinggang Wang, and Ying Shan. Yolo-world: Real-time open-vocabulary object detection. CVPR, 2024a. +Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, and Lidong Bing. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms, 2024b. +Rohan Choudhury, Koichiro Niinuma, Kris M Kitani, and Laszlo A Jeni. Zero-shot video question answering with procedural programs. arXiv preprint arXiv:2312.00937, 2023. +Rohan Choudhury, Koichiro Niinuma, Kris M. Kitani, and László A. Jeni. Video question answering with procedural programs. In ECCV, 2025. +David Cohen. Universals in linguistic theory, 1968. +Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. ArXiv, abs/2403.11481, 2024. +Charles J Fillmore. The case for case. Bach and Harms (Ed.): Universals in Linguistic Theory, 1967. +Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. ArXiv, abs/2405.21075, 2024. +Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18995-19012, 2022. +Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +Minkuk Kim, Hyeon Bae Kim, Jinyoung Moon, Jinwoo Choi, and Seong Tae Kim. Do you remember? dense video captioning with cross-modal memory retrieval. In CVPR, 2024. +Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In ECCV, 2024. +Jianxin Liang, Xiaojun Meng, Yueqian Wang, Chang Liu, Qun Liu, and Dongyan Zhao. End-to-end video question answering with frame scoring mechanisms and adaptive sampling. ArXiv, abs/2407.15047, 2024. +Ruotong Liao, Max Eler, Huiyu Wang, Guangyao Zhai, Gengyuan Zhang, Yunpu Ma, and Volker Tresp. Videoinsta: Zero-shot long video understanding via informative spatial-temporal reasoning with llms. In EMNLP Findings, 2024. +Shilong Liu, Hao Cheng, Haotian Liu, Hao Zhang, Feng Li, Tianhe Ren, Xueyan Zou, Jianwei Yang, Hang Su, Jun Zhu, et al. Llava-plus: Learning to use tools for creating multimodal agents. In European Conference on Computer Vision, pages 126-142. Springer, 2024. + +William C Mann and Sandra A Thompson. Rhetorical structure theory: Toward a functional theory of text organization. Text-interdisciplinary Journal for the Study of Discourse, 8(3):243-281, 1988. +Leland Gerson Neuberg. Causality: models, reasoning, and inference, by juda pearl, cambridge university press, 2000. Econometric Theory, 19(4):675-685, 2003. +Junting Pan, Ziyi Lin, Yuying Ge, Xiatian Zhu, Renrui Zhang, Yi Wang, Yu Qiao, and Hongsheng Li. Retrieving-to-answer: Zero-shot video question answering with frozen large language models. In ICCV Workshops, 2023. +Jong Sung Park, Kanchana Ranasinghe, Kumara Kahatapitiya, Wonjeong Ryoo, Donghyun Kim, and Michael S. Ryoo. Too many frames, not all useful: Efficient strategies for long-form video qa. ArXiv, abs/2406.09396, 2024. +Rui Qian, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Shuangrui Ding, Dahua Lin, and Jiaqi Wang. Streaming long video understanding with large language models. In NeurIPS, 2024. +Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. +Manjusha Rajan and Latha Parameswaran. Key frame extraction algorithm for surveillance videos using an evolutionary approach. Scientific Reports, 15(1):536, 2025. +Kanchana Ranasinghe, Xiang Li, Kumara Kahapatitiya, and Michael S Ryoo. Understanding long videos with multimodal language models. In ICLR, 2025. +Yudi Shi, Shangzhe Di, Qirui Chen, and Weidi Xie. Unlocking video-llm via agent-of-thoughts distillation. arXiv preprint arXiv:2412.01694, 2024. +Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multi-modal llms. arXiv preprint arXiv:2409.10994, 2024. +John F. Sowa. Knowledge Representation: Logical, Philosophical, and Computational Foundations. Brooks/Cole Publishing Co., Pacific Grove, CA, USA, 2000. +Leonard Talmy. Toward a Cognitive Semantics (Volume 1: Concept Structuring Systems; Volume 2: Typology and Process in Concept Structuring). MIT Press, Cambridge, MA, USA, 2000. +Reuben Tan, Xineng Sun, Ping Hu, Jui hsien Wang, Hanieh Deilamsalehy, Bryan A. Plummer, Bryan Russell, and Kate Saenko. Koala: Key frame-conditioned long video-llm. CVPR, 2024. +Yunlong Tang, Jing Bi, Siting Xu, Luchuan Song, Susan Liang, Teng Wang, Daoan Zhang, Jie An, Jingyang Lin, Rongyi Zhu, et al. Video understanding with large language models: A survey. arXiv preprint arXiv:2312.17432, 2023. +Hengyi Wang, Haizhou Shi, Shiwei Tan, Weiyi Qin, Wenyuan Wang, Tunyu Zhang, Akshay Nambi, Tanuja Ganu, and Hao Wang. Multimodal needle in a haystack: Benchmarking long-context capability of multimodal large language models, 2025. +Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, 2024a. +Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, pages 58-76. Springer, 2024b. +Zhanyu Wang, Longyue Wang, Zhen Zhao, Minghao Wu, Chenyang Lyu, Huayang Li, Deng Cai, Luping Zhou, Shuming Shi, and Zhaopeng Tu. Gpt4video: A unified multimodal large language model for Instruction-followed understanding and safety-aware generation. In ACM MM, pages 3907-3916, 2024c. +Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. ArXiv, abs/2405.19209, 2024d. +Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024e. + +Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2024. +Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13587-13597, 2022. +Penghao Wu and Saining Xie. V*: Guided visual search as a core mechanism in multimodal llms. CVPR, 2023. +Jiaqi Xu, Cuiling Lan, Wenxuan Xie, Xuejin Chen, and Yan Lu. Retrieval-based video language model for efficient long video question answering. arXiv preprint arXiv:2312.04931, 2023. +Jinhui Ye, Zihan Wang, and Haosen Sun. Longvideohaystack. https://huggingface.co/datasets/LVHaystack/LongVideoHaystack, 2025a. v1.0. +Jinhui Ye, Zihan Wang, Haosen Sun, Keshigeyan Chandrasegaran, Zane Durante, Cristobal Eyzaguirre, Yonatan Bisk, Juan Carlos Niebles, Ehsan Adeli, Li Fei-Fei, Jiajun Wu, and Manling Li. Re-thinking temporal search for long-form video understanding. In CVPR, 2025b. +Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. National Science Review, 2024. +Sicheng Yu, Chengkai Jin, Huan Wang, Zhenghao Chen, Sheng Jin, Zhongrong Zuo, Xiaolei Xu, Zhenbang Sun, Bingni Zhang, Jiawei Wu, Hao Zhang, and Qianru Sun. Frame-voyager: Learning to query frames for video large language models. ArXiv, abs/2410.03226, 2024. +Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, 2019. +Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, Yali Wang, Yu Qiao, and Limin Wang. Timesuite: Improving mllms for long video understanding via grounded tuning, 2024. +Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. In EMNLP, 2023. +Lu Zhang, Tiancheng Zhao, Heting Ying, Yibo Ma, and Kyusong Lee. OmAgent: A multi-modal agent framework for complex video understanding with task divide-and-conquer. In EMNLP, 2024. +Zijia Zhao, Haoyu Lu, Yuqi Huo, Yifan Du, Tongtian Yue, Longteng Guo, Bingning Wang, Weipeng Chen, and Jing Liu. Needle in a video haystack: A scalable synthetic evaluator for video mllms. arXiv preprint arXiv:2406.09367, 2024. +Heqing Zou, Tianze Luo, Guiyang Xie, Fengmao Lv, Guangcong Wang, Junyang Chen, Zhuochen Wang, Hansheng Zhang, Huajian Zhang, et al. From seconds to hours: Reviewing multimodal large language models on comprehensive long video understanding. arXiv preprint arXiv:2409.18938, 2024. + +# Part I + +# Appendix + +# Table of Contents + +A Theoretical Underpinnings of Relation Categories 14 + +A.1 Linguistic Grounding 14 +A.2 Logical Grounding 14 +A.3 Pragmatic Completeness for VQA 14 + +B Performance 15 +C Analysis of the Impact of Search Frame Count 15 + +D Details of Datasets 16 + +D.1 Details ofVIDEO-MME 16 +D.2 Details of LONGVIDEOBENCH 16 +D.3 Details of LV-HAYSTACK 16 +D.4 Details of EGO-4D 17 + +E Detailed Algorithm 17 + +E.1 Algorithm Overview and Core Components 17 +E.2 Implementation Considerations 19 +E.3 Computational Complexity Analysis 19 +E.4 Technical Implementation Details 19 +E.5 Practical Application Examples 21 +E.6 System Specifications for Reproductivity 21 + +F Case Study of VSLS Keyframe Selection 21 +G Iteration Analysis 22 + +H Prompt 23 + +H.1 Prompt Template for Query Grounding 23 +H.2 Prompt Template for Question Answering 23 + +I Limitations 24 + +J Broader Impacts 24 + +J.1 Positive Impacts 24 +J.2 Potential Considerations 24 + +# A Theoretical Underpinnings of Relation Categories + +Our choice of the four relation categories—spatial, temporal, attribute, and causal—is grounded in foundational concepts from linguistics and logic. While achieving absolute “completeness” in describing the infinite complexity of the real world is a formidable challenge, this selection aims to describe core aspects of events, states, and the way humans conceptualize and communicate them. + +# A.1 Linguistic Grounding + +Semantic Roles and Case Grammar: Theories like Fillmore's Case Grammar Fillmore (1967) analyze sentences in terms of semantic roles that nominals play in relation to the verb (the event). + +- Spatial relations directly correspond to roles like Locative (the location of an event or state) or Path (the trajectory of motion). +- Temporal relations align with Temporal roles, specifying when an event occurs or its duration. +- Attributes describe the properties of entities (participants) involved in these roles. While not direct case roles for verbs, they are fundamental for identifying and characterizing the "who" and "what" (e.g., Agent, Patient, Theme, Instrument) that possess these attributes during an event. +- Causal relations are central to understanding agency and event structure. Roles like Agent (the instigator of an action) or Cause (the non-volitional trigger of an event) highlight the importance of causality in linguistic descriptions of events. + +Lexical Semantics and Event Structure: Works in lexical semantics (e.g., following Pustejovsky Cohen (1968) on the generative lexicon, or Talmy Talmy (2000) on cognitive semantics) often decompose event meaning into fundamental components. Talmy Talmy (2000), for instance, extensively discusses how language structures concepts like space, time, and force dynamics (which inherently relate to causality). Events are situated in space and time, involve entities with specific attributes, and are often linked through causal chains (e.g., one action causing another, or an agent causing a change of state). + +Discourse Relations: Theories like Rhetorical Structure Theory (RST) Mann and Thompson (1988) identify relations that bind textual units together. Many of these fundamental relations are inherently temporal (e.g., Sequence), causal (e.g., Cause, Result, Purpose), or involve describing entities and their settings (which encompasses spatial and attributive information, often under relations like Elaboration or Background). This suggests that these four categories capture essential elements for constructing coherent descriptions and explanations, a core function of Video Question Answering (VQA). + +# A.2 Logical Grounding + +Predicate Logic and Knowledge Representation: In formal logic and AI knowledge representation (e.g., Sowa Sowa (2000)), events and states are often represented using predicates with arguments that specify participants, locations, times, and properties. A typical event representation might implicitly or explicitly include Location(event, place), Time(event, time_interval), HasProperty(event, attribute_value), and relations like Causes(event1, event2). Our four categories provide a high-level abstraction over these common predicate types. + +Modal and Specialized Logics: Temporal Logic is specifically designed to reason about propositions qualified in terms of time. + +- Spatial Logic deals with reasoning about spatial properties and relations between entities. +- Logics of Action and Causality (e.g., situation calculus, event calculus, or Pearl's work on causality Neuberg (2003)) explicitly model how actions bring about changes and the causal dependencies between events. + +# A.3 Pragmatic Completeness for VQA + +From a pragmatic standpoint, particularly for VQA, these four relations address the core "Wh-questions" humans often ask to understand a scene or event: + +- What/Who? (Identifies objects/entities, often distinguished by their attributes) +- Where? (Answered by spatial relations) +- When? (Answered by temporal relations) + +- Why/How did it happen? (Often answered by causal relations or a sequence of events linked temporally and spatially) + +While more fine-grained relations (as in Action Genome) undoubtedly provide deeper semantic detail, our chosen set aims to provide a foundational, yet computationally manageable, framework for keyframe selection based on the most common semantic and logical inferences required for a broad range of video queries. They represent a level of abstraction that is both meaningful for human queries and feasible for current visual-language models to parse and verify. + +In essence, these categories are not arbitrary but reflect fundamental dimensions along which events and states are structured, perceived, and communicated in language and reasoned about in logic. We believe they offer a robust and broadly applicable framework for the task at hand. + +# B Performance + +Long-form video understanding presents unique challenges due to the complexity of temporal dynamics and cross-modal interactions in extended durations (900-3,600 seconds). Our comprehensive evaluation of the LVB-XL benchmark reveals significant performance gaps between existing approaches. While large-scale models like GPT-4O (32 frames) and INTERNVL 2.5-78B (16 frames) have demonstrated competence in short-video tasks, their direct application to long-form content (marked by circle sizes proportional to model parameters) yields suboptimal results (53.8% and 56.5% accuracy respectively). + +Our Visual Semantic-Logical Search (VSLS) framework addresses these limitations. This advancement enables consistent performance improvements across different architecture scales, elevating GPT-40 to $54.2\%$ $(+0.4\mathrm{pp})$ and achieving a remarkable $62.4\%$ $(+5.9\mathrm{pp})$ for INTERNLV 2.5-78B on this benchmark. The comparative analysis further suggests that VSLS's gains become particularly pronounced when processing longer visual sequences, highlighting its effectiveness in modeling extended temporal contexts. + +# C Analysis of the Impact of Search Frame Count + +![](images/1c05698f5ebb877c71c8dc80fe7f27060313591cbce3eed88b06bbac45cee230.jpg) +Figure 5: Performance improvement with increasing search frames. VSLS consistently enhances accuracy and reaches near-human oracle performance at 64 frames. + +This section investigates the impact of the number of search frames on the performance of our Visual Language Models (VLMs) in the context of LONGVIDEOBENCH. + +Figure 5 in the T* framework study empirically demonstrates the non-monotonic relationship between input frame quantity and model accuracy on the LONGVIDEOBENCH XL benchmark. Through systematic experimentation across 18 state-of-the-art VLMs, this visualization reveals a critical + +phenomenon: excessive frame inputs degrade performance for models lacking temporal redundancy mitigation mechanisms. + +# D Details of Datasets + +# D.1 Details of Video-MME + +The Video-MME (Video Multi-Modal Evaluation) dataset represents the first comprehensive benchmark tailored to assess the capabilities of Vision-Language Models (VLMs) in video understanding. Aiming to address limitations in existing benchmarks, it emphasizes diversity, temporal complexity, and multi-modal integration while ensuring high-quality human annotations. The dataset contains 900 carefully curated videos across six primary domains—Knowledge, Film and Television, Sports Competition, Artistic Performance, Life Record, and Multilingual—with 30 fine-grained subcategories such as astronomy, esports, and documentaries. These videos vary significantly in duration, ranging from short clips (11 seconds) to long-form content (up to 1 hour), enabling robust evaluation across temporal scales. + +Each video is paired with expert-annotated multiple-choice questions (2,700 QA pairs in total), rigorously validated to ensure clarity and reliance on visual or multi-modal context. Questions span 12 task types, including action recognition, temporal reasoning, and domain-specific knowledge, with a focus on scenarios where answers cannot be inferred from text alone. To quantify temporal complexity, the dataset introduces certificate length analysis, revealing that answering questions often requires understanding extended video segments (e.g., median lengths of 26 seconds for short videos and 890.7 seconds for long videos), surpassing the demands of prior benchmarks like EGOSchema. + +VIDEO-MME serves as a universal benchmark, applicable to both image- and video-focused MLLMs, and exposes key challenges for future research. These include improving architectures for long-sequence processing, developing datasets for complex temporal reasoning, and enhancing cross-modal alignment. By providing a rigorous evaluation framework,VIDEO-MME aims to drive progress toward MLLMs capable of understanding dynamic, real-world scenarios. + +# D.2 Details of LONGVIDEOBENCH + +The LONGVIDEOBENCH benchmark pioneers the evaluation of long-context interleaved video-language understanding in VLMs, addressing critical gaps in existing benchmarks through its focus on detailed retrieval and temporal reasoning over hour-long multimodal inputs. Designed to overcome the "single-frame bias" prevalent in prior video benchmarks, the novel referring reasoning paradigm enables models to locate and analyze specific contexts within extended sequences. The data set comprises 3,763 web-sourced videos that span various themes - movies, news, life vlogs, and knowledge domains (including art, history, and STEM) - with durations progressively grouped into four levels: 8-15 seconds, 15-60 seconds, 3-10 minutes, and 15-60 minutes. Each video is paired with aligned subtitles, forming interleaved multimodal inputs that mimic real-world viewing scenarios. + +The benchmark features 6,678 human-annotated multiple-choice questions categorized into 17 fine-grained task types across two levels: Perception (requiring object/attribute recognition in single scenes) and Relation (demanding temporal/causal reasoning across multiple scenes). Questions incorporate explicit referring queries (e.g., "When the woman descends the rocky hill...") that anchor reasoning to specific video moments, with an average question length of 43.5 words to ensure precision. Temporal complexity is quantified through duration-grouped analysis, where models must process up to 256 frames (at 1 fps) for hour-long videos, significantly exceeding the demands of predecessors like EGOSchema (180s videos). + +# D.3 Details of LV-HAYSTACK + +The LV-HAYSTACK benchmark establishes the first comprehensive evaluation framework for temporal search in long-form video understanding, addressing critical limitations in existing synthetic needle-in-haystack benchmarks through real-world video annotations and multi-dimensional evaluation metrics. Designed to assess models' ability to locate minimal keyframe sets (typically 1-5 frames) from hour-long videos containing tens of thousands of frames, the dataset comprises 3,874 human + +annotated instances spanning 150 hours of video content across two distinct categories: egocentric videos from EGO4D (101 hours) and allocentric videos from LONGVIDEOBENCH (57.7 hours). + +Organized into HAYSTACK-EGO4D and HAYSTACK-LVBENCH subsets, the benchmark features videos averaging 24.8 minutes in length (max 60 minutes) with 44,717 frames per video. Each instance contains: + +- Expert-curated multi-choice questions requiring temporal reasoning (15.9 questions/video); +- Human-annotated keyframe sets (4.7 frames/question for egocentric, 1.8 frames/question for allocentric); +- Temporal and visual similarity metrics for precise search evaluation. + +# D.4 Details of EGO-4D + +The EGO4D (Egocentric Computer Vision Benchmark) dataset establishes a transformative foundation for advancing research in first-person visual perception through unprecedented scale, diversity, and multi-modal integration. Designed to overcome limitations in existing egocentric datasets, it captures 3,670 hours of unscripted daily activities from 931 participants across 74 global locations and 9 countries, spanning household, workplace, leisure, and outdoor scenarios. The dataset features $30+$ fine-grained activity categories including carpentry, social gaming, and meal preparation, with videos ranging from brief interactions (8-minute clips) to extended continuous recordings (up to 10 hours), enabling comprehensive analysis of long-term behavioral patterns. + +Each video is enriched with multi-modal annotations totaling 3.85 million dense textual narrations (13.2 sentences/minute), coupled with 3D environment meshes, eye gaze tracking, stereo vision, and synchronized multi-camera views. Rigorous privacy protocols ensure ethical data collection, with 612 hours containing unblurred faces/audio for social interaction studies. The benchmark suite introduces five core tasks organized across temporal dimensions: + +- Episodic Memory: Temporal localization of natural language queries (74K instances) and 3D object tracking using Matterport scans; +- **Hand-Object Interaction:** State change detection (1.3M annotations) with PNR (point-of-no-return) temporal localization; +- Social Understanding: Audio-visual diarisation (2,535h audio) and gaze-directed communication analysis; +- Action Forecasting: Anticipation of locomotion trajectories and object interactions. + +Quantitative analysis reveals the dataset's complexity: hand-object interactions involve 1,772 unique verbs and 4,336 nouns, while social scenarios contain 6.8 participant interactions per minute on average. Multi-modal fusion experiments demonstrate performance gains, with 3D environment context improving object localization accuracy by $18.7\%$ compared to RGB-only baselines. State-of-the-art models achieve $68.9\%$ accuracy in action anticipation tasks, yet struggle with long-term forecasting (41.2% accuracy for 5s predictions), highlighting critical challenges in temporal reasoning. + +EGO4D's unique integration of egocentric video with complementary modalities (IMU data in 836h, gaze tracking in 45h) enables novel research directions in embodied AI and augmented reality. The dataset exposes fundamental limitations in current architectures, particularly in processing hour-long video contexts and synthesizing cross-modal signals—only $23\%$ of tested models effectively utilized audio-visual synchronization cues. By providing standardized evaluation protocols and curated challenge subsets, EGO4D serves as a universal testbed for developing perceptive systems capable of understanding persistent 3D environments and complex human behaviors. + +# E Detailed Algorithm + +The detailed VSLS algorithm is represented in Algorithm 2. + +# E.1 Algorithm Overview and Core Components + +The algorithm operates as an adaptive search framework that intelligently explores video content (represented as set $V$ ) to locate frames matching semantic-logical query requirements $(Q)$ . Unlike + +Algorithm 2: The completed Visual Semantic-Logical Search +Function SemanticLogicalTemporalSearch(V,Q,K, $\Delta_t,\tau ,\alpha ,\gamma$ .. + $\mathcal{O},\mathcal{R}\gets$ ParseQuestion(Q); // Extract key/cue objects and relationships + $P\leftarrow$ Uniform, $B\leftarrow |V|,S\leftarrow \emptyset ,N_{v}\leftarrow |V|$ // Initialize distribution and state +while $B > 0$ and $|\mathcal{O}| > 0$ do + $k\gets \lfloor \sqrt{B}\rfloor ,G\gets$ Grid(Sample $(P,k^2)$ ); // Adaptive grid sampling + $\Omega \gets$ DetectObjects(G); // Detect objects in sampled frames +foreach $g\in G$ do + $C_g\gets$ CalculateBaseScore( $\Omega [g])$ ; // Base detection confidence +foreach $r\in \mathcal{R}$ do if r.type $=$ Spatial then $C_g\gets C_g + \alpha \gamma_{\mathrm{spatial}}\cdot$ CheckSpatialRelationship(r, $\Omega [g])$ else if r.type $=$ Temporal then $C_g\gets C_g + \alpha \gamma_{\mathrm{time}}\cdot$ CheckTemporalRelationship(r, $\Omega ,\Delta_t)$ else if r.type $=$ Causal then $C_g\gets C_g + \alpha \gamma_{\mathrm{causal}}\cdot$ CheckCausalRelationship(r, $\Omega$ ) else if r.type $=$ Attribute then $C_g\gets C_g + \alpha \gamma_{\mathrm{attr}}\cdot$ CheckAttributeRelationship(r, $\Omega [g],\tau$ UpdateScores(S,g,Cg); // Update global score registry DiffuseScores(S,w); // Temporal context propagation $P\gets$ NormalizeDistribution(S), $B\gets B - k^{2}$ // Update sampling distribution foreach $g\in \operatorname {TopK}(S,K)$ do if $\Omega [g]\cap \mathcal{O}\neq \emptyset$ then $\begin{array}{rl}{\mathcal{O}}&{\leftarrow\mathcal{O}\backslash\Omega[g]}\end{array}$ // Remove identified key objects +return TopK(S,K); // Return top-K keyframes + +traditional linear search methods, it employs a probabilistic sampling strategy that dynamically adjusts based on confidence scores from multiple relationship types. + +Initialization Phase The process begins by parsing the input query $Q$ into two fundamental components: + +- $\mathcal{O}$ : A set of key objects or entities to identify +- $\mathcal{R}$ : A collection of relationships (spatial, temporal, causal, and attribute) that must be satisfied + +The algorithm initializes with a uniform probability distribution $(P)$ across all video frames, establishing a budget $(B)$ equivalent to the total number of frames $(|V|)$ , and creating an empty score registry $(S)$ to track confidence values. This approach ensures unbiased initial exploration before evidence-guided refinement. + +Adaptive Sampling Strategy Rather than exhaustively processing every frame, the algorithm employs a square-root scaling sampling strategy where $k = \lfloor \sqrt{B} \rfloor$ determines the sampling density. This provides a mathematical balance between exploration breadth and computational efficiency. The Grid function organizes sampled frames into a structured representation that preserves spatial-temporal relationships, facilitating subsequent relationship analysis. + +Multi-modal Object Detection The DetectObjects function applies state-of-the-art computer vision techniques to identify objects within each sampled frame. This step leverages deep neural networks pre-trained on diverse visual datasets, enabling recognition of a wide range of entities with their corresponding confidence scores and spatial locations within frames. + +Score Propagation and Distribution Update The DiffuseScores function implements a temporal context propagation mechanism that spreads confidence values to neighboring frames, acknowledging that relevant content likely extends beyond individual frames. This diffusion creates a smoothed confidence landscape that guides subsequent sampling. + +After each iteration, the algorithm normalizes the accumulated scores to form an updated probability distribution, focusing future sampling on promising regions while maintaining exploration potential in unexamined areas. + +Convergence Criteria and Termination The search continues until either: + +- The sampling budget $(B)$ is exhausted, indicating comprehensive coverage of the video content +- All target objects $(\mathcal{O})$ have been successfully identified at satisfactory confidence levels + +This dual-termination approach balances thoroughness with efficiency, preventing unnecessary computation once objectives are met. + +Result Generation The algorithm concludes by returning the top-K frames with the highest confidence scores, representing the most relevant video segments that satisfy the semantic-logical query requirements. These keyframes provide a concise summary of the content matching the user's information needs. + +# E.2 Implementation Considerations + +The algorithm's performance depends on several configurable parameters: + +- $\Delta_{t}$ : Temporal window size for relationship analysis +- $\tau$ : Confidence threshold for attribute matching +- $\alpha$ : Global relationship influence factor +- $\gamma$ : Type-specific relationship weights + +These parameters can be tuned based on application requirements, video characteristics, and computational constraints. The algorithm's modular design allows for straightforward substitution of specific component implementations (e.g., different object detectors or relationship checkers) without altering the overall framework. + +# E.3 Computational Complexity Analysis + +The time complexity scales with $O(\sqrt{N})$ where $N$ is the total number of frames, significantly improving upon linear approaches. Space complexity remains $O(N)$ to maintain the probability distribution and score registry. The algorithm intelligently balances exploration and exploitation through its adaptive sampling approach, making it particularly suitable for large-scale video analysis tasks where exhaustive processing would be prohibitive. + +# E.4 Technical Implementation Details + +Object Detection and Feature Extraction To achieve real-time performance, the object detection module utilizes pre-trained deep convolutional neural network architectures, particularly variants based on FAST R-CNN andYOLO series. The system employs a two-stage detection strategy: + +- Preliminary Detection: Using lightweight models to rapidly identify potential regions; +- Fine-grained Classification: Applying more sophisticated models for detailed classification on high-confidence regions. + +The feature extraction process leverages self-attention mechanisms from Visual Transformers (ViT), generating rich semantic embeddings robust to various visual variations such as scale, rotation, and illumination. Each identified object is associated with a feature vector $f_{i} \in \mathbb{R}^{d}$ , where $d = 512$ represents the dimensionality of the embedding space. + +Mathematical Formulations for Relationship Assessment The evaluation of various relationship types is based on precise mathematical definitions: + +Spatial Relationships Given bounding boxes $B_{i} = (x_{i},y_{i},w_{i},h_{i})$ and $B_{j} = (x_{j},y_{j},w_{j},h_{j})$ for two objects, the confidence for a spatial relationship $r_{\text{spatial}}$ is calculated as: + +$$ +C _ {\text {s p a t i a l}} \left(B _ {i}, B _ {j}, r\right) = \phi_ {r} \left(B _ {i}, B _ {j}\right) \cdot \psi \left(B _ {i}\right) \cdot \psi \left(B _ {j}\right), \tag {12} +$$ + +where $\phi_r$ is a relationship-specific compatibility function and $\psi$ is the object detection confidence. For example, the compatibility for a "contains" relationship is defined as: + +$$ +\phi_ {\text {c o n t a i n s}} \left(B _ {i}, B _ {j}\right) = \frac {\operatorname {I o U} \left(B _ {i} , B _ {j}\right)}{\operatorname {A r e a} \left(B _ {j}\right)}. \tag {13} +$$ + +Temporal Relationships Temporal relationships are calculated by evaluating object behavior patterns across a sequence of frames $\{F_t, F_{t+1}, \dots, F_{t+\Delta_t}\}$ : + +$$ +C _ {\text {t e m p o r a l}} \left(O _ {i}, O _ {j}, r, \Delta_ {t}\right) = \prod_ {k = 0} ^ {\Delta_ {t} - 1} T _ {r} \left(O _ {i} ^ {t + k}, O _ {j} ^ {t + k + 1}\right), \tag {14} +$$ + +where $T_{r}$ is a relationship-specific temporal transition matrix and $O_{i}^{t}$ represents the state of object $i$ at time $t$ . + +Causal Relationships Causal relationships utilize a Bayesian network framework to compute conditional probabilities: + +$$ +C _ {\text {c a u s a l}} \left(E _ {i}, E _ {j}\right) = P \left(E _ {j} \mid E _ {i}\right) \cdot \log \frac {P \left(E _ {j} \mid E _ {i}\right)}{P \left(E _ {j}\right)}, \tag {15} +$$ + +where $E_{i}$ and $E_{j}$ represent the presumed cause event and effect event, respectively. + +Attribute Relationships Attribute evaluation employs cosine similarity metrics between feature vectors and attribute prototypes: + +$$ +C _ {\text {a t t r}} \left(O _ {i}, a\right) = \max \left(0, \cos \left(f _ {i}, p _ {a}\right) - \tau\right), \tag {16} +$$ + +where $p_a$ is the prototype vector for attribute $a$ and $\tau$ is the minimum similarity threshold. + +Score Propagation Algorithm Temporal score propagation is implemented through a weighted diffusion process, analogous to heat diffusion on a graph structure: + +$$ +S ^ {\prime} (t) = S (t) + \sum_ {k \in \mathcal {N} (t)} w _ {k, t} \cdot S (k), \tag {17} +$$ + +where $\mathcal{N}(t)$ represents the temporal neighborhood of frame $t$ , and $w_{k,t}$ is a weight based on temporal distance, defined as: + +$$ +w _ {k, t} = \exp \left(- \frac {\left| k - t \right| ^ {2}}{2 \sigma^ {2}}\right), \tag {18} +$$ + +where $\sigma$ controls the diffusion range. + +Adaptive Sampling Optimization The sampling strategy is further improved through a dynamically adjusted Thompson sampling method, modeling the probability distribution $P$ as a Beta distribution with shape parameters updated through previous observations: + +$$ +P (t) \sim \operatorname {B e t a} \left(\alpha_ {t} + \sum_ {i} S _ {i} (t), \beta_ {t} + n - \sum_ {i} S _ {i} (t)\right), \tag {19} +$$ + +where $\alpha_{t}$ and $\beta_{t}$ are prior hyperparameters and $n$ is the total number of observations. + +# E.5 Practical Application Examples + +In practical visual search scenarios, the algorithm processes complex queries such as "a person wearing a blue shirt sits down at a table and then picks up a coffee cup": + +- Query parsing identifies key objects (person, shirt, table, coffee cup) and relationships (blue attribute, sitting action, temporal before-after relation, spatial proximity); +- Adaptive sampling selects representative frames from the video; +- Multi-rerelationship evaluation integrates various sources of evidence; +- Score propagation establishes a unified confidence landscape across related frame sets; +- Result generation provides a concise summary of the most relevant segments in the video. + +This semantic-logical-temporal search framework represents a significant advancement in multimodal content retrieval, enabling natural language queries that incorporate complex relationships across objects, time, and causal chains. + +# E.6 System Specifications for Reproductivity + +Our experiments were conducted on high-performance servers, each equipped with either an Intel(R) Xeon(R) Platinum 8378A CPU @ 3.00GHz or an Intel(R) Xeon(R) Platinum 8358P CPU @ 2.60GHz, 1TB of RAM, and 4/6 NVIDIA A800 GPUs with 80GB memory. Machines with 4 GPUs are configured with the SXM4 version, while those with 6 GPUs use the PCIe version. The software environment included Python 3.11, PyTorch 2.4, and NCCL 2.21.5 for reactivity. + +# F Case Study of VSLS Keyframe Selection + +![](images/94d01838d210618c918d4b0d5acc496b6d56923e612c4a95d392bb84b400ab9a.jpg) +Figure 6: Qualitative comparison of frame selection strategies demonstrates VSLS's ability to pinpoint query-critical moments (e.g., the subject presenting pink objects) with temporal precision, while baseline approaches exhibit color misinterpretation (brown) due to suboptimal frame choices. VSLS maintains superior temporal diversity and content relevance, effectively avoiding the redundant selections observed in comparative methods. + +As shown in Figure 6, the VSLS framework demonstrates its effectiveness through a video question-answering case study involving temporal handwriting analysis. The experiment focuses on distinguishing between two sequential events: a brown pen writing "guitar" at 2 seconds and a pink pen rewriting the same word at 3 seconds, with the query requiring identification of the second occurrence's pen color. + +VSLS's analytical process unfolds through three interpretable phases: + +- Semantic Logic Extraction: Identifies core visual entities (handwritten text, pen, paper) and constructs temporal relationships through triplet formulation: (text, time, pen), establishing the framework for tracking writing instrument changes; +- Temporal Relevance Scoring: The gray relevance curve reveals precise temporal localization, with peak scores aligning perfectly with ground truth positions at 2s and 3s, contrasting sharply with baseline methods' random fluctuations; +- Search Pattern Visualization: Demonstrates VSLS's focused inspection near critical moments versus uniform sampling's scattered temporal coverage, explaining the baseline's failure to detect the pink pen. + +This case study yields two critical insights about VSLS's temporal reasoning: + +- Sequential Event Disambiguation: The system successfully differentiates between near-identical visual events through: + +- First writing instance: Brown pen detection(false positive); +- Second writing instance: Pink pen detection(true positive). +- Explanation of answer generation disparity: VSLS produces the correct answer ("Pink") versus uniform sampling's erroneous baseline ("Brown") due to temporal reasoning failures. + +The spatial-temporal alignment between relevance peaks and ground truth positions confirms VSLS's unique capacity to synchronize semantic logic with visual evidence flow. This case particularly highlights the method's superiority in scenarios requiring precise discrimination of recurrent events with subtle visual variations. + +# G Iteration Analysis + +![](images/a12bb641ca82972e0549859e62e742682b2319ef656a04a9604927b07e64f4ec.jpg) +Figure 7: The comparative visualization of iteration counts on the medium-length video subset of the VIDEO-MME dataset demonstrates that our method consistently requires a higher number of iterations compared to the T* approach. + +As shown in Fig 7, incorporating relations into the search algorithm will increase the average number of iterations for the video of medium length in the Video-MME dataset from 15.9 to 23.8. The overall distribution of video iteration will not be significantly changed. + +# H Prompt + +# H.1 Prompt Template for Query Grounding + +Here is the prompt we used for query grounding. + +# Prompt Template for Query Grounding + +Analyze the following video frames and the question: + +Question: + +Options: <0options> + +Step 1: Key Object Identification + +- Extract 3-5 core objects detectable by computer vision +- Use YOLO-compatible noun phrases (e.g., "person", "mic") +- Format: Key Objects: obj1, obj2, obj3 + +Step 2: Contextual Cues + +- List 2-4 scene elements that help locate key objects based on options provided +- Use detectable items (avoid abstract concepts) +- Format: Cue Objects: cue1, cue2, cue3 + +Step 3: Relationship Triplets + +- Relationship types: + +- Spatial: Objects must appear in the same frame +- Attribute: Color/size/material descriptions (e.g., "red clothes", "large") +- Time: Appear in different frames within a few seconds +- Causal: There is a temporal order between the objects + +- Format of Relations: (object, relation_type, object), relation_type should be exactly one of spatial/attribute/time/causal + +# Output Rules + +1. One line each for Key Objects/Cue Objects/Rel starting with exact prefixes +2. Separate items with comma except for triplets where items are separated by semicolon +3. Never use markdown or natural language explanations +4. If you cannot identify any key objects or cue objects from the video provided, please just identify the possible key or cue objects from the question and options provided + +# Below is an example of the procedure: + +Question: For "When does the person in red clothes appear with the dog?" + +Response: + +Key Objects: person, dog, red clothes +Cue Objects: grassy_area, leash, fence +Rel: (person; attribute; red clothes), (person; spatial; dog) + +# Format your response EXACTLY like this in three lines: + +Key Objects: object1, object2, object + +Cue Objects: object1, object2, object + +Rel: (object1; relation_type1; object2), (object3; relation_type2; object4) + +# H.2 Prompt Template for Question Answering + +Here is the prompt we used for question answering. + +# Prompt Template for Question Answering + +Select the best answer to the following multiple-choice question based on the video. + + + + + +. + +Question: + +Options: <0options> + +Answer with the option's letter from the given choices directly. + +Your response format should be strictly an upper case letter A,B,C,D or E. + +# I Limitations + +Despite the promising results of our VSLS framework, we acknowledge several limitations: First, although our approach reduces the required frame sampling to just $1.4\%$ , the computational complexity remains a consideration for extremely long videos, with a search overhead of approximately 7.8 seconds. This may present challenges for real-time or low-latency applications. Besides, the performance of VSLS is bounded by the capabilities of the underlying object detector (YOLO-WORLD). Detection accuracy may degrade under challenging visual conditions such as poor lighting, occlusion, or unusual camera angles, potentially affecting temporal coverage. + +# J Broader Impacts + +Our Visual Semantic-Logical Search (VSLS) framework primarily offers positive societal impacts as a foundational algorithm for efficient keyframe selection in long videos. + +# J.1 Positive Impacts + +- Educational Applications: VSLS enables students and educators to quickly locate relevant segments in instructional videos, improving learning efficiency for visual content. +- Research Enhancement: Scientists across disciplines can benefit from more efficient analysis of video archives, particularly those studying behavioral patterns or analyzing historical footage. +- Computational Efficiency: By sampling only $1.4\%$ of frames on average, our approach reduces computational requirements and energy consumption, contributing to more sustainable AI applications. +- Accessibility: Our framework can be integrated into assistive technologies for individuals with cognitive processing challenges, helping them identify and focus on critical moments in video content. + +# J.2 Potential Considerations + +As a foundational algorithm, VSLS has limited direct negative impacts. However, like any computer vision technology, applications built upon it should be mindful of general considerations: + +- Underlying Model Biases: The performance of VSLS depends partly on object detection systems (e.g.,YOLO-World), so it inherits any limitations or biases present in these components. Our modular design allows for substitution with improved detection systems as they become available. + +# NeurIPS Paper Checklist + +# 1. Claims + +Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? + +Answer: [Yes] + +Justification: The abstract and introduction clearly state the main contributions of our work, including (1) the proposal of a semantics-driven keyframe search framework using four logical relations, (2) performance gains on multiple long video QA benchmarks, (3) efficient frame sampling $(1.4\%)$ with state-of-the-art results, and (4) plug-and-play compatibility with VLM/LLM pipelines. These claims are supported by both the method and experimental sections (see Sections "Introduction", "Method", and "Experiment"), and limitations are discussed in the main paper and Appendix I. The claims are fully aligned with the presented theoretical and empirical results. + +# Guidelines: + +- The answer NA means that the abstract and introduction do not include the claims made in the paper. +- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers. +- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings. +- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper. + +# 2. Limitations + +Question: Does the paper discuss the limitations of the work performed by the authors? + +Answer: [Yes] + +Justification: The paper discusses limitations in Appendix I. + +# Guidelines: + +- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper. +- The authors are encouraged to create a separate "Limitations" section in their paper. +- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be. +- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated. +- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon. +- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size. +- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness. +- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations. + +# 3. Theory assumptions and proofs + +Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof? + +Answer: [NA] + +Justification: The paper does not include formal theoretical results, theorems, or proofs. Our work is primarily methodological and experimental; all mathematical formulations are used to describe the algorithm and its components, but no formal theorems are claimed or proved. Therefore, this item is not applicable. + +# Guidelines: + +- The answer NA means that the paper does not include theoretical results. +- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced. +- All assumptions should be clearly stated or referenced in the statement of any theorems. +- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition. +- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material. +- Theorems and Lemmas that the proof relies upon should be properly referenced. + +# 4. Experimental result reproducibility + +Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)? + +Answer: [Yes] + +Justification: The paper provides comprehensive details required for reproducibility, including descriptions of all datasets used (see Section "Details of Datasets" and Appendix D), implementation details of the proposed algorithm (see "Method" and "Algorithm Overview"), hyperparameter choices, prompt templates (Appendix "Prompt"), and evaluation protocols for each experiment. We also specify the object detection models and baselines used, and state that the code will be publicly released. This level of detail allows other researchers to replicate the main experiments and validate our claims. + +# Guidelines: + +- The answer NA means that the paper does not include experiments. +- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not. +- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable. + +- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed. + +- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example + +(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm. +(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully. + +(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset). +(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results. + +# 5. Open access to data and code + +Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material? + +# Answer: [Yes] + +Justification: We state in the abstract and main text that the code will be publicly released. All datasets used in our experiments are from public benchmarks (LONGVIDEOBENCH,VIDEO-MME, HAYSTACK-LVBENCH, EGO4D), and details for data access are provided in Appendix D. Instructions for running our framework, data preparation, and experiment replication will be included in the released code repository. Thus, researchers will be able to access both code and data with clear instructions for full reproducibility. + +# Guidelines: + +- The answer NA means that paper does not include experiments requiring code. +- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details. +- While we encourage the release of code and data, we understand that this might not be possible, so "No" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark). +- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details. +- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc. +- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why. +- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable). +- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted. + +# 6. Experimental setting/details + +Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results? + +# Answer: [Yes] + +Justification: The paper specifies all relevant experimental details, including descriptions of dataset splits, hyperparameters, evaluation metrics, and prompt templates (see "Experiment," Table captions, and Appendix D). As our method is training-free, we clarify in the main text which components rely on pre-trained models and explicitly describe all parameter settings for reproducibility. This ensures that readers can fully understand and interpret the reported results. + +# Guidelines: + +- The answer NA means that the paper does not include experiments. +- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them. + +- The full details can be provided either with the code, in appendix, or as supplemental material. + +# 7. Experiment statistical significance + +Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments? + +# Answer: [No] + +Justification: The paper does not report error bars or formal statistical significance tests for the main experimental results, as our approach is deterministic and uses fixed dataset splits and pre-trained models. Metrics are reported as single values following common practice in recent long video QA benchmarks. While this is standard in the area, we acknowledge that including error bars or additional significance analysis would further strengthen the experimental evaluation. + +# Guidelines: + +- The answer NA means that the paper does not include experiments. +- The authors should answer "Yes" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper. +- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions). +- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.) +- The assumptions made should be given (e.g., Normally distributed errors). +- It should be clear whether the error bar is the standard deviation or the standard error of the mean. +- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a $96\%$ CI, if the hypothesis of Normality of errors is not verified. +- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates). +- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text. + +# 8. Experiments compute resources + +Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments? + +# Answer: [Yes] + +Justification: The paper specifies the computing environment in Appendix E.6, and reports both latency and FLOPs for major baselines and our method in Table 1. We also provide the number of iterations, average processing time, and model sizes in the main text and tables. This information is sufficient for others to estimate compute requirements and reproduce the experiments. + +# Guidelines: + +- The answer NA means that the paper does not include experiments. +- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage. +- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute. +- The paper should disclose whether the full research project required more compute than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper). + +# 9. Code of ethics + +Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines? + +Answer: [Yes] + +Justification: The research follows the NeurIPS Code of Ethics. All datasets used are publicly available, appropriately licensed, and include human annotation with proper privacy safeguards (see Appendix D). No personally identifiable information or sensitive data is used. The proposed methods and experiments present no foreseeable risk of harm, discrimination, or privacy violation. Anonymity is preserved in all supplementary materials. + +Guidelines: + +- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics. +- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics. +- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction). + +# 10. Broader impacts + +Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed? + +Answer: [Yes] + +Justification: Our paper discusses broader impacts in Appendix J. + +Guidelines: + +- The answer NA means that there is no societal impact of the work performed. +- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact. +- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations. +- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster. +- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology. +- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML). + +# 11. Safeguards + +Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)? + +Answer: [NA] + +Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize. + +- The answer NA means that the paper poses no such risks. + +- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters. +- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images. +- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort. + +# 12. Licenses for existing assets + +Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected? + +Answer: [NA] + +Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize. + +# Guidelines: + +- The answer NA means that the paper does not use existing assets. +- The authors should cite the original paper that produced the code package or dataset. +- The authors should state which version of the asset is used and, if possible, include a URL. +- The name of the license (e.g., CC-BY 4.0) should be included for each asset. +- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided. +- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset. +- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided. +- If this information is not available online, the authors are encouraged to reach out to the asset's creators. + +# 13. New assets + +Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets? + +Answer: [Yes] + +Justification: We will release code for our VSLS framework upon publication, as mentioned in the abstract. The code will be accompanied by comprehensive documentation detailing the implementation of our four logical dependencies (spatial, temporal, attribute, and causal), the iterative refinement process, and instructions for reproducing our experimental results. Our paper does not introduce new datasets but rather evaluates our method on existing benchmarks including LONGVIDEOBENCH, VIDEO-MME, and HAYSTACK-LVBENCH, which are properly cited throughout the paper. + +# Guidelines: + +- The answer NA means that the paper does not release new assets. +- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc. +- The paper should discuss whether and how consent was obtained from people whose asset is used. + +- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file. + +# 14. Crowdsourcing and research with human subjects + +Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)? + +Answer: [NA] + +Justification: Our research does not involve crowdsourcing or human subject experiments. We evaluate our method using existing benchmarks (LONGVIDEOBENCH,VIDEO-MME, LONGVIDEOBENCH) that contain human-annotated ground truth data, but we did not collect new human annotations or conduct human evaluations as part of our work. Our methodology is purely algorithmic, focusing on the semantic-logical frameworks for keyframe selection and evaluation through computational metrics. + +# Guidelines: + +- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects. +- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper. +- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector. + +# 15. Institutional review board (IRB) approvals or equivalent for research with human subjects + +Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained? + +Answer: [NA] + +Justification: Our research does not involve human subjects. We utilize existing benchmark datasets (LONGVIDEOBENCH, VIDEO-MME, HAYSTACK-LVBENCH) without collecting new data from human participants. Our work focuses on developing and evaluating algorithmic approaches for keyframe selection based on semantic-logical relationships, which do not require IRB approval or equivalent ethical review processes. + +# Guidelines: + +- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects. +- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper. +- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution. +- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review. + +# 16. Declaration of LLM usage + +Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required. + +Answer: [Yes] + +Justification: Our Visual Semantic-Logical Search framework uses LLMs (specifically mentioned in Section 3.2 and Figure 2) as part of our query decomposition process. We employ models such as LLAVA-7B and GPT-40 to extract semantic information from + +textual queries, including key objects, cue objects, and their logical relationships. This LLM-based decomposition is an integral component of our method, as it enables the identification of the four logical relation types (spatial, temporal, attribute, and causal) that guide our keyframe selection process. The prompt template for this query grounding is provided in Appendix H. + +# Guidelines: + +- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components. + +- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13139/images/0ac1d21efd0cb56e7487c4fd721935d9ea73ceb841af753fd12a4061a0102ba1.jpg b/data/2025/2503_13xxx/2503.13139/images/0ac1d21efd0cb56e7487c4fd721935d9ea73ceb841af753fd12a4061a0102ba1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..319bf2acd612441b49e80d1ba2ad21dc0cc656aa --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/0ac1d21efd0cb56e7487c4fd721935d9ea73ceb841af753fd12a4061a0102ba1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b2b04f23092bf7c1a8a9fea27cdc1b8ab83a832ad14f114680081999043a31 +size 5953 diff --git a/data/2025/2503_13xxx/2503.13139/images/0d565d9512a082fd672adf12ff6042cc2378eeb821c3f9581e7b4235f2cc1bcb.jpg b/data/2025/2503_13xxx/2503.13139/images/0d565d9512a082fd672adf12ff6042cc2378eeb821c3f9581e7b4235f2cc1bcb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37244921b979b78cc8dbca95e641804948ef04fd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/0d565d9512a082fd672adf12ff6042cc2378eeb821c3f9581e7b4235f2cc1bcb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f3e63f0cf2b821553ca79062ba261e6f2025b95c947e2c937d7a263ff24414 +size 3526 diff --git a/data/2025/2503_13xxx/2503.13139/images/1c05698f5ebb877c71c8dc80fe7f27060313591cbce3eed88b06bbac45cee230.jpg b/data/2025/2503_13xxx/2503.13139/images/1c05698f5ebb877c71c8dc80fe7f27060313591cbce3eed88b06bbac45cee230.jpg new file mode 100644 index 0000000000000000000000000000000000000000..530c0ce1e970598059627c08dc012ca8b5e50aa9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/1c05698f5ebb877c71c8dc80fe7f27060313591cbce3eed88b06bbac45cee230.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2262f1c2ecea6278a5f3a8553bba4fd3f8999308d7fff260221f8fd3b76bb854 +size 37197 diff --git a/data/2025/2503_13xxx/2503.13139/images/2cbbe9459cd07a11b8b6cf63240b6eb53b964cbf2cfd943bf25bbe734517e78c.jpg b/data/2025/2503_13xxx/2503.13139/images/2cbbe9459cd07a11b8b6cf63240b6eb53b964cbf2cfd943bf25bbe734517e78c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55dafc7eecabc5fa118b922a522d28af1144c120 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/2cbbe9459cd07a11b8b6cf63240b6eb53b964cbf2cfd943bf25bbe734517e78c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56620eca273f091a0e5610c6dfdd99f75be046c33f761e89ff744d058f45fda0 +size 14048 diff --git a/data/2025/2503_13xxx/2503.13139/images/3269a0ba4ba415d7370c101f6a7dce166285be0b34ece8282452072c7694fb9a.jpg b/data/2025/2503_13xxx/2503.13139/images/3269a0ba4ba415d7370c101f6a7dce166285be0b34ece8282452072c7694fb9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f42fe6435204c3ba02552585886d69755dd417da --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/3269a0ba4ba415d7370c101f6a7dce166285be0b34ece8282452072c7694fb9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:633d9edab2389801151428a3a5c0844d855b10fffda500bd4e8244f07fba21d1 +size 86131 diff --git a/data/2025/2503_13xxx/2503.13139/images/38acc18505c76dd154f63dcc30a60695263952fd5e1c996c5dbaf6981479fc57.jpg b/data/2025/2503_13xxx/2503.13139/images/38acc18505c76dd154f63dcc30a60695263952fd5e1c996c5dbaf6981479fc57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..798c415069b96eb101e40e54c5f4f2f10a04a724 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/38acc18505c76dd154f63dcc30a60695263952fd5e1c996c5dbaf6981479fc57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5d66d54fb791e3107500bfeb757a9c47943af1a77d1245802022f275839f69a +size 12687 diff --git a/data/2025/2503_13xxx/2503.13139/images/4a28afb109fe09584067f27ec21de26be56af1ca14d3475e8f6132495f5cdf7a.jpg b/data/2025/2503_13xxx/2503.13139/images/4a28afb109fe09584067f27ec21de26be56af1ca14d3475e8f6132495f5cdf7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea1fb4ef96247be199f33aa2e7c2303bee0453c8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/4a28afb109fe09584067f27ec21de26be56af1ca14d3475e8f6132495f5cdf7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f2f3f576bec9e5074fdf0be8dfe7e3649ab237bde4a49674dcf04d7f2aa0f56 +size 8168 diff --git a/data/2025/2503_13xxx/2503.13139/images/511325680ed4273ed05a5920b32ea94b3a9cbc28b135c165cbc68b931bd57695.jpg b/data/2025/2503_13xxx/2503.13139/images/511325680ed4273ed05a5920b32ea94b3a9cbc28b135c165cbc68b931bd57695.jpg new file mode 100644 index 0000000000000000000000000000000000000000..248b73ed56885bdc4d6d5b86ea92726c78d5e960 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/511325680ed4273ed05a5920b32ea94b3a9cbc28b135c165cbc68b931bd57695.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:764f918839a655b39b2e716e2d0dc63c5e168a08f7edcff9d268a35f30eeacf9 +size 5297 diff --git a/data/2025/2503_13xxx/2503.13139/images/5408beda2a39e51f3d8b63e6913b6b998eaf0668924dec183510cf1a30f61ce8.jpg b/data/2025/2503_13xxx/2503.13139/images/5408beda2a39e51f3d8b63e6913b6b998eaf0668924dec183510cf1a30f61ce8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f8d15019a397b707ebe88e1dec4f45eb56aab2b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/5408beda2a39e51f3d8b63e6913b6b998eaf0668924dec183510cf1a30f61ce8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f05c127d4265ebb4195328e8f495e71ce262a6ad45d6ceb97d541530a832a5c +size 3510 diff --git a/data/2025/2503_13xxx/2503.13139/images/5ac759c399e4241d5fdbfd16a0f85ca6b782a2f2813aa2bbd1cc48e98d36fdae.jpg b/data/2025/2503_13xxx/2503.13139/images/5ac759c399e4241d5fdbfd16a0f85ca6b782a2f2813aa2bbd1cc48e98d36fdae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fda149f32ba1ebac1b0af893bbb345ea031dcce --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/5ac759c399e4241d5fdbfd16a0f85ca6b782a2f2813aa2bbd1cc48e98d36fdae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac0bffa27eb071f7719182e381de22d7850c1b730924db2ca02327b4e5d9e28b +size 3980 diff --git a/data/2025/2503_13xxx/2503.13139/images/676bbf3304df1cb8bf6e1d45a0d6fb99a711d39f00a7dac0b26af20143b03b71.jpg b/data/2025/2503_13xxx/2503.13139/images/676bbf3304df1cb8bf6e1d45a0d6fb99a711d39f00a7dac0b26af20143b03b71.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0195236540bd186e80be895b557ba123439ae51 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/676bbf3304df1cb8bf6e1d45a0d6fb99a711d39f00a7dac0b26af20143b03b71.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f66d9719aed2512f5779ae37f98bb1d54c28e6b7484d65395ff6050a663c0469 +size 5554 diff --git a/data/2025/2503_13xxx/2503.13139/images/695a4ea89f1390a6314d0efb1c2f03296b29ca46622c4f759c167008eba24d34.jpg b/data/2025/2503_13xxx/2503.13139/images/695a4ea89f1390a6314d0efb1c2f03296b29ca46622c4f759c167008eba24d34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a232d8b30358ca5dec1f5b12bb1836593a0b1aa2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/695a4ea89f1390a6314d0efb1c2f03296b29ca46622c4f759c167008eba24d34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6aca32733f8ea75b9766b656be1f4b651c1243c6013097a9b373f9a1741751a +size 8214 diff --git a/data/2025/2503_13xxx/2503.13139/images/70069406c3ee25f7805367e06c6d48170b97455f4be6514bb40965b70097b46f.jpg b/data/2025/2503_13xxx/2503.13139/images/70069406c3ee25f7805367e06c6d48170b97455f4be6514bb40965b70097b46f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5446ecf69644818c86899879308c040f4028fb67 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/70069406c3ee25f7805367e06c6d48170b97455f4be6514bb40965b70097b46f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a31138eba645c53f8cc0f7175769b4e702f63d8ceeefc90e7999036423b55d1 +size 16424 diff --git a/data/2025/2503_13xxx/2503.13139/images/77ffdb555eea1232cf87a622c67c7e006cf68311bef63dd1fbc645e13a499a9d.jpg b/data/2025/2503_13xxx/2503.13139/images/77ffdb555eea1232cf87a622c67c7e006cf68311bef63dd1fbc645e13a499a9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39b6b88f56319b88b2afa62b14c6cfc844dac310 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/77ffdb555eea1232cf87a622c67c7e006cf68311bef63dd1fbc645e13a499a9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e01000daccb296ea5cfd864ad067ebb8d5e8bb3da885d6f3e0fd16e6e1b879e +size 9472 diff --git a/data/2025/2503_13xxx/2503.13139/images/7e6ec89ef236cb486a46fb294d0375b8275a409cdda4409bec158388efd8cfb3.jpg b/data/2025/2503_13xxx/2503.13139/images/7e6ec89ef236cb486a46fb294d0375b8275a409cdda4409bec158388efd8cfb3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6b7e7ba9d33f17e090358d273c06afa5e3f8e6c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/7e6ec89ef236cb486a46fb294d0375b8275a409cdda4409bec158388efd8cfb3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:992e15d8a1e4b07b466c430e242d617ad3cb32f348dbe2068422530ad4438e23 +size 4342 diff --git a/data/2025/2503_13xxx/2503.13139/images/9418dbcf790187e3dd3a1499947366d89992e9107a4f5b6b4e378bd6721a2cc2.jpg b/data/2025/2503_13xxx/2503.13139/images/9418dbcf790187e3dd3a1499947366d89992e9107a4f5b6b4e378bd6721a2cc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7f3513853af8ed5ee3bb78aaaa518bd4a5b7cc6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/9418dbcf790187e3dd3a1499947366d89992e9107a4f5b6b4e378bd6721a2cc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31abdb9bd069c21e871c5c7e2130d67d0e9a219f43c1ebc82be797bade04140b +size 37544 diff --git a/data/2025/2503_13xxx/2503.13139/images/94231a5d2a0704895637946eabe85545b336071375abcfbbf71d4801fdb3ce38.jpg b/data/2025/2503_13xxx/2503.13139/images/94231a5d2a0704895637946eabe85545b336071375abcfbbf71d4801fdb3ce38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b883b0b576806daa052580daa613d230a608e2a6 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/94231a5d2a0704895637946eabe85545b336071375abcfbbf71d4801fdb3ce38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf1ad4d52508d4f4826189787abcc589684527e127cee1731686a1f835d8175d +size 9294 diff --git a/data/2025/2503_13xxx/2503.13139/images/94d01838d210618c918d4b0d5acc496b6d56923e612c4a95d392bb84b400ab9a.jpg b/data/2025/2503_13xxx/2503.13139/images/94d01838d210618c918d4b0d5acc496b6d56923e612c4a95d392bb84b400ab9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..608640a3322f1479e497641dd21c1c6f61b0e7da --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/94d01838d210618c918d4b0d5acc496b6d56923e612c4a95d392bb84b400ab9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b95a662022c40802061b6546253c5b0be7a4a8a814941f6a55dcb8bbf4785114 +size 36368 diff --git a/data/2025/2503_13xxx/2503.13139/images/953c9d83f6870c26dad9968e0c39a6e9242291e0ea0bf567b0874126d0a19571.jpg b/data/2025/2503_13xxx/2503.13139/images/953c9d83f6870c26dad9968e0c39a6e9242291e0ea0bf567b0874126d0a19571.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c805717eda9477e905681d0be7c081fa60efc10 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/953c9d83f6870c26dad9968e0c39a6e9242291e0ea0bf567b0874126d0a19571.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d6ef475929ae57ea3ddd8f5dfde37fdd0433c78cbfdbc0207b49dde233590e7 +size 5915 diff --git a/data/2025/2503_13xxx/2503.13139/images/97000cb287c16fdfdb7eb5b57ac87b488b53bdf10f7712a12a7a1d22a55787d8.jpg b/data/2025/2503_13xxx/2503.13139/images/97000cb287c16fdfdb7eb5b57ac87b488b53bdf10f7712a12a7a1d22a55787d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6719ccc6b81b8bd14ad01eee734ce7626111f404 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/97000cb287c16fdfdb7eb5b57ac87b488b53bdf10f7712a12a7a1d22a55787d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3a16b888e9839ae168fb0f2be32b541ccb61e7073d855249436b5f53df9f129 +size 54107 diff --git a/data/2025/2503_13xxx/2503.13139/images/a0ece509b574cfba939c9f0b341a7663e61092f677c80d6f88407e3aa6baeb1d.jpg b/data/2025/2503_13xxx/2503.13139/images/a0ece509b574cfba939c9f0b341a7663e61092f677c80d6f88407e3aa6baeb1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4f7b5a70b510635efc0d688182233cb937b8cf9 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/a0ece509b574cfba939c9f0b341a7663e61092f677c80d6f88407e3aa6baeb1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2ce7754a58d4db70dd656800db743c9b9b55b7b2df9074b363197e65916bace +size 5937 diff --git a/data/2025/2503_13xxx/2503.13139/images/a12bb641ca82972e0549859e62e742682b2319ef656a04a9604927b07e64f4ec.jpg b/data/2025/2503_13xxx/2503.13139/images/a12bb641ca82972e0549859e62e742682b2319ef656a04a9604927b07e64f4ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18fc23489ac7f8432af165c012c2b90b5180b0e1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/a12bb641ca82972e0549859e62e742682b2319ef656a04a9604927b07e64f4ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37499ee0796a6f82ca79ef6973a6035732f6cfe5fbd156adaedbbb3f9117ee5e +size 18891 diff --git a/data/2025/2503_13xxx/2503.13139/images/a968b2ea382466d089a06a218564465d838639afd95d1fa4a4ea5ed058508ec0.jpg b/data/2025/2503_13xxx/2503.13139/images/a968b2ea382466d089a06a218564465d838639afd95d1fa4a4ea5ed058508ec0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c82dfc6d5e0ed8c174373a90153d6ae180ca1f5f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/a968b2ea382466d089a06a218564465d838639afd95d1fa4a4ea5ed058508ec0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68809b2ae02e780beef7ef5442589d78041d14b3bc0d197e62d4348a460b3a02 +size 5273 diff --git a/data/2025/2503_13xxx/2503.13139/images/ac70dfed2f93f2e57ec2e765bb99ce5996cc84efe94f743f0640b0f5b681d414.jpg b/data/2025/2503_13xxx/2503.13139/images/ac70dfed2f93f2e57ec2e765bb99ce5996cc84efe94f743f0640b0f5b681d414.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e092899dcaa73018fb2d86aee696de940d66b77 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/ac70dfed2f93f2e57ec2e765bb99ce5996cc84efe94f743f0640b0f5b681d414.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf774063113b75b12e424d4188adad7cfaafbefefde60eafbe393a2861543176 +size 6027 diff --git a/data/2025/2503_13xxx/2503.13139/images/b122957dd055813357d936dff056404604c37dce8079ebfdf60a5643e31f325f.jpg b/data/2025/2503_13xxx/2503.13139/images/b122957dd055813357d936dff056404604c37dce8079ebfdf60a5643e31f325f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66c5ad7943927debe2651b7aef5f5a11dbab2a84 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/b122957dd055813357d936dff056404604c37dce8079ebfdf60a5643e31f325f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bc7b0e41a4dc9ebdebe985099bd4647bd0a82a154b7e608cfd856df2f9d6e7a +size 94739 diff --git a/data/2025/2503_13xxx/2503.13139/images/b6b19c0e6bd22751c5f1e0df89f4485608192d639321a8a37dddaf8a1c2ea6bb.jpg b/data/2025/2503_13xxx/2503.13139/images/b6b19c0e6bd22751c5f1e0df89f4485608192d639321a8a37dddaf8a1c2ea6bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fce37d9c366467b32b7e0aa5b0fb340f629665b5 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/b6b19c0e6bd22751c5f1e0df89f4485608192d639321a8a37dddaf8a1c2ea6bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16762a39639edb176a56db5059bd8498607ffb376e78ae3f3b2343cd55200b10 +size 7435 diff --git a/data/2025/2503_13xxx/2503.13139/images/b7caaad2a9fefb25b612157ad0bb1360615e5b13b8e9d761838013ee4af882fd.jpg b/data/2025/2503_13xxx/2503.13139/images/b7caaad2a9fefb25b612157ad0bb1360615e5b13b8e9d761838013ee4af882fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80ed13e10e435d2b626d468d2714bf5f9bc2987e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/b7caaad2a9fefb25b612157ad0bb1360615e5b13b8e9d761838013ee4af882fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a582d6dabae31a0e0cf11c14a134ea7d2a1ab9b54bd0fe2e9c4832ce7e1e6c8 +size 16786 diff --git a/data/2025/2503_13xxx/2503.13139/images/befbb97408628da460b2c29aea40fd3443321e17d35cabd22a3efcca679f78c4.jpg b/data/2025/2503_13xxx/2503.13139/images/befbb97408628da460b2c29aea40fd3443321e17d35cabd22a3efcca679f78c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..457dfa4b50d2c86332688ad71548d59349e0f93e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/befbb97408628da460b2c29aea40fd3443321e17d35cabd22a3efcca679f78c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b119b4b555a0b93ddac80ed7a6aedbecf99ea164a5ffd07885bebc00cfbfbf1 +size 5186 diff --git a/data/2025/2503_13xxx/2503.13139/images/c2bb1f52e9e19170d0c025e96d90330f57871bc152ba1bbb27307286e8aafd7e.jpg b/data/2025/2503_13xxx/2503.13139/images/c2bb1f52e9e19170d0c025e96d90330f57871bc152ba1bbb27307286e8aafd7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c813b890d486a8bb40415b32363d6d5bcbc0af65 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/c2bb1f52e9e19170d0c025e96d90330f57871bc152ba1bbb27307286e8aafd7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6b2c1325ee8872a2ac40a34e7e7aeaf20f8d32b966eaad4572854d813c6e5ae +size 8686 diff --git a/data/2025/2503_13xxx/2503.13139/images/c3f0aa82c5f958d8abd4d9ad4624a66f9da286d41b402ba7581849377bde1843.jpg b/data/2025/2503_13xxx/2503.13139/images/c3f0aa82c5f958d8abd4d9ad4624a66f9da286d41b402ba7581849377bde1843.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8ba4e34576d4d103a96ed83dcafba1cbd215121 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/c3f0aa82c5f958d8abd4d9ad4624a66f9da286d41b402ba7581849377bde1843.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcdf1786df4c2723d4abd1e3369b8d0b2b0e158ced0cffe8ca1f9320bee7817a +size 4817 diff --git a/data/2025/2503_13xxx/2503.13139/images/c61ac3848f91015fed47389a4b36e33e2e9c4471568eacab710f50bb62574e37.jpg b/data/2025/2503_13xxx/2503.13139/images/c61ac3848f91015fed47389a4b36e33e2e9c4471568eacab710f50bb62574e37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83a2e6abb9c21e654da748c3553683493644b4bd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/c61ac3848f91015fed47389a4b36e33e2e9c4471568eacab710f50bb62574e37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:252f1b8fe91d2dcc8587ba75478f8b9fb780574df0fbd573db2b3182426882d4 +size 9350 diff --git a/data/2025/2503_13xxx/2503.13139/images/cb19cfb9dbee0c99a276e2b549faec542655bf11e4318afd9cb3e3b30c96200d.jpg b/data/2025/2503_13xxx/2503.13139/images/cb19cfb9dbee0c99a276e2b549faec542655bf11e4318afd9cb3e3b30c96200d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89169ed1ae77d24acb8b6be23b19cff004ad0ead --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/cb19cfb9dbee0c99a276e2b549faec542655bf11e4318afd9cb3e3b30c96200d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fca6e6755be4d31a522329d9a5475ece001f649931ebd42e2a5cae804e4d17f7 +size 36601 diff --git a/data/2025/2503_13xxx/2503.13139/images/ccaa6d3ddd7d2546164640711ddc2250c2b915f71d03072139e00436c2ce2805.jpg b/data/2025/2503_13xxx/2503.13139/images/ccaa6d3ddd7d2546164640711ddc2250c2b915f71d03072139e00436c2ce2805.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ddef617e0d8d1c0462471537e93a40683a2836c2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/ccaa6d3ddd7d2546164640711ddc2250c2b915f71d03072139e00436c2ce2805.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15f88bb277bba4ac873118846a90cc7e8141f2cf9ba4a6b077629f99926ea61c +size 7257 diff --git a/data/2025/2503_13xxx/2503.13139/images/d47a19ff9a05498992989851401417220f3bb6fb49d58def00dde2c82c804a66.jpg b/data/2025/2503_13xxx/2503.13139/images/d47a19ff9a05498992989851401417220f3bb6fb49d58def00dde2c82c804a66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0a67514faa9772b32f60d0c5cd34da0fa923053 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/d47a19ff9a05498992989851401417220f3bb6fb49d58def00dde2c82c804a66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88912ced1cf987555dbe6283019f6635bd119e5bc0ef47b1ab6fc030ccf0da27 +size 75428 diff --git a/data/2025/2503_13xxx/2503.13139/images/dca7b5107d57502a594576fb2a7086ecdb83f65dea7c30f15ae11f64aeee5585.jpg b/data/2025/2503_13xxx/2503.13139/images/dca7b5107d57502a594576fb2a7086ecdb83f65dea7c30f15ae11f64aeee5585.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0e50c46b61a52332a9ab17c0d08645bbeed4f90 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/dca7b5107d57502a594576fb2a7086ecdb83f65dea7c30f15ae11f64aeee5585.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d000b1e129fb06973d2333760289188348c981c266c4d6c56ce9d8cf7ddc880 +size 3320 diff --git a/data/2025/2503_13xxx/2503.13139/images/ea37ec53eb15b2a0ff0e8bdea1cc5566bbdc337da0a7d13b1c404d2c4da49c1e.jpg b/data/2025/2503_13xxx/2503.13139/images/ea37ec53eb15b2a0ff0e8bdea1cc5566bbdc337da0a7d13b1c404d2c4da49c1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..019d303444581e7d7826ce501dcee991e25e962f --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/ea37ec53eb15b2a0ff0e8bdea1cc5566bbdc337da0a7d13b1c404d2c4da49c1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c21c6eb8f40c2011d14928d6ef77f81be31b66dd0cadff2a64e32832d8dc3cb +size 9934 diff --git a/data/2025/2503_13xxx/2503.13139/images/f4ce9972802824b1e904f47e93a3d933772e13b6530cb6d80f8fb6b800e4bad8.jpg b/data/2025/2503_13xxx/2503.13139/images/f4ce9972802824b1e904f47e93a3d933772e13b6530cb6d80f8fb6b800e4bad8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bb75690d8501d9171000d44b61e8f12dc0e9134 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/f4ce9972802824b1e904f47e93a3d933772e13b6530cb6d80f8fb6b800e4bad8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35287a62dab60ca8633c221a36c18678221b638034d367dabc5412803fc3c929 +size 42585 diff --git a/data/2025/2503_13xxx/2503.13139/images/fdc82addde7f6fa12197b2b446eeb2d46ee9ef9d934d08e9e4d05c2517b9e0f2.jpg b/data/2025/2503_13xxx/2503.13139/images/fdc82addde7f6fa12197b2b446eeb2d46ee9ef9d934d08e9e4d05c2517b9e0f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58ba91f2f758029663375777705e7689875be67d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/images/fdc82addde7f6fa12197b2b446eeb2d46ee9ef9d934d08e9e4d05c2517b9e0f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a15aea9aa4fabe0e2def53178f923c968ff6511fec4d192a66e4bfb26e8b7d13 +size 6745 diff --git a/data/2025/2503_13xxx/2503.13139/layout.json b/data/2025/2503_13xxx/2503.13139/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8137b3f6a0f03f1d9c7fe650491acf21f7267ab1 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13139/layout.json @@ -0,0 +1,27103 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 138, + 97, + 475, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 97, + 475, + 159 + ], + "spans": [ + { + "bbox": [ + 138, + 97, + 475, + 159 + ], + "type": "text", + "content": "Logic-in-Frames: Dynamic Keyframe Search via Visual Semantic-Logical Verification for Long Video Understanding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 144, + 188, + 468, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 188, + 468, + 210 + ], + "spans": [ + { + "bbox": [ + 144, + 188, + 468, + 210 + ], + "type": "text", + "content": "Weiyu Guo Ziyang Chen Shaoguang Wang Jianxiang He Yijie Xu AI Thrust, HKUST(GZ)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 212, + 468, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 212, + 468, + 223 + ], + "spans": [ + { + "bbox": [ + 141, + 212, + 468, + 223 + ], + "type": "text", + "content": "{wguo395, zchen483, swang440, jhe307, yxu409}@connect.hkust-gz.edu.cn" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 211, + 224, + 253, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 224, + 253, + 235 + ], + "spans": [ + { + "bbox": [ + 211, + 224, + 253, + 235 + ], + "type": "text", + "content": "Jinhui Ye" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 182, + 236, + 281, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 236, + 281, + 247 + ], + "spans": [ + { + "bbox": [ + 182, + 236, + 281, + 247 + ], + "type": "text", + "content": "Shanghai AI Laboratory" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 187, + 248, + 278, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 248, + 278, + 258 + ], + "spans": [ + { + "bbox": [ + 187, + 248, + 278, + 258 + ], + "type": "text", + "content": "jinhuiyes@gmail.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 318, + 224, + 422, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 224, + 422, + 236 + ], + "spans": [ + { + "bbox": [ + 318, + 224, + 422, + 236 + ], + "type": "text", + "content": "Ying Sun* Hui Xiong*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 321, + 236, + 420, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 236, + 420, + 246 + ], + "spans": [ + { + "bbox": [ + 321, + 236, + 420, + 246 + ], + "type": "text", + "content": "AI Thrust, HKUST(GZ)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 247, + 428, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 247, + 428, + 258 + ], + "spans": [ + { + "bbox": [ + 314, + 247, + 428, + 258 + ], + "type": "text", + "content": "{yings, xionghui}@ust.hk" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 281, + 266, + 329, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 266, + 329, + 278 + ], + "spans": [ + { + "bbox": [ + 281, + 266, + 329, + 278 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 285, + 471, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 285, + 471, + 483 + ], + "spans": [ + { + "bbox": [ + 140, + 285, + 471, + 483 + ], + "type": "text", + "content": "Understanding long video content is a complex endeavor that often relies on densely sampled frame captions or end-to-end feature selectors, yet these techniques commonly overlook the logical relationships between textual queries and visual elements. In practice, computational constraints necessitate coarse frame subsampling, a challenge analogous to \"finding a needle in a haystack.\" To address this issue, we introduce a semantics-driven search framework that reformulates keyframe selection under the paradigm of Visual Semantic-Logical Search. Specifically, we systematically define four fundamental logical dependencies: 1) spatial co-occurrence, 2) temporal proximity, 3) attribute dependency, and 4) causal order. These relations dynamically update frame sampling distributions through an iterative refinement process, enabling context-aware identification of semantically critical frames tailored to specific query requirements. Our method establishes new SOTA performance on the manually annotated benchmark in key-frame selection metrics. Furthermore, when applied to downstream video question-answering tasks, the proposed approach demonstrates the best performance gains over existing methods on LONGVIDEOBENCH and VIDEO-MME, validating its effectiveness in bridging the logical gap between textual queries and visual-temporal reasoning. The code will be publicly available." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 490, + 192, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 192, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 192, + 502 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 506, + 588 + ], + "type": "text", + "content": "Vision-Language Models (VLMs) Yin et al. (2024) have achieved remarkable progress in video understanding Zou et al. (2024); Tang et al. (2023), particularly in video question answering Wang et al. (2024c); Zhang et al. (2023), demonstrating potential for modeling real-world scenarios. However, existing methods can only simultaneously process a limited number of frames due to the inherent token limit and extremely high dimension of spatio-temporal video data, especially for long videos. Furthermore, uniformly sampled keyframes are query-agnostic and insufficient to represent query-related contents. To tackle these challenges, this paper addresses a pivotal research question:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 591, + 470, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 591, + 470, + 613 + ], + "spans": [ + { + "bbox": [ + 140, + 591, + 470, + 613 + ], + "type": "text", + "content": "How can we efficiently and accurately select keyframes that are semantically critical for answering video-based queries?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 618, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 618, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 618, + 506, + 696 + ], + "type": "text", + "content": "We hypothesize that deconstructing visual semantic and logical cues (e.g., target objects, logical relations including temporal, spatial, attribute, and causal relationships between visual entities) from textual queries enables effective identification of task-relevant frames through heuristic sampling and search. Building on this insight, we propose Visual Semantic-Logical Search (VSLS), a novel keyframe search framework that incorporates target object confidence estimation and joint verification of visual semantic logic into the iterative update of frame sampling distribution and selects the most informative frames with the highest confidence. Experimental results show that our" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 117, + 702, + 208, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 702, + 208, + 713 + ], + "spans": [ + { + "bbox": [ + 117, + 702, + 208, + 713 + ], + "type": "text", + "content": "*Corresponding authors." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 14, + 202, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 202, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 202, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2503.13139v2 [cs.CV] 17 May 2025" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "type": "text", + "content": "Preprint." + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 119, + 80, + 292, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 80, + 292, + 88 + ], + "spans": [ + { + "bbox": [ + 119, + 80, + 292, + 88 + ], + "type": "text", + "content": "Q: In the video, what color pen did the author use when he wrote" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 88, + 248, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 88, + 248, + 95 + ], + "spans": [ + { + "bbox": [ + 113, + 88, + 248, + 95 + ], + "type": "text", + "content": "```\n\\\"guitar\\\" for the second time?" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 108, + 102, + 203, + 147 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 139, + 79 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 139, + 79 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 139, + 79 + ], + "type": "text", + "content": "Temporal" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 113, + 95, + 138, + 100 + ], + "lines": [ + { + "bbox": [ + 113, + 95, + 138, + 100 + ], + "spans": [ + { + "bbox": [ + 113, + 95, + 138, + 100 + ], + "type": "text", + "content": "A) Brown" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 102, + 203, + 147 + ], + "lines": [ + { + "bbox": [ + 108, + 102, + 203, + 147 + ], + "spans": [ + { + "bbox": [ + 108, + 102, + 203, + 147 + ], + "type": "image", + "image_path": "953c9d83f6870c26dad9968e0c39a6e9242291e0ea0bf567b0874126d0a19571.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 148, + 133, + 156 + ], + "lines": [ + { + "bbox": [ + 110, + 148, + 133, + 156 + ], + "spans": [ + { + "bbox": [ + 110, + 148, + 133, + 156 + ], + "type": "text", + "content": "Spatial" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 211, + 102, + 304, + 147 + ], + "blocks": [ + { + "bbox": [ + 235, + 72, + 277, + 79 + ], + "lines": [ + { + "bbox": [ + 235, + 72, + 277, + 79 + ], + "spans": [ + { + "bbox": [ + 235, + 72, + 277, + 79 + ], + "type": "text", + "content": "(text, time, pen)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 211, + 95, + 236, + 100 + ], + "lines": [ + { + "bbox": [ + 211, + 95, + 236, + 100 + ], + "spans": [ + { + "bbox": [ + 211, + 95, + 236, + 100 + ], + "type": "text", + "content": "B) Pink" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 211, + 102, + 304, + 147 + ], + "lines": [ + { + "bbox": [ + 211, + 102, + 304, + 147 + ], + "spans": [ + { + "bbox": [ + 211, + 102, + 304, + 147 + ], + "type": "image", + "image_path": "ac70dfed2f93f2e57ec2e765bb99ce5996cc84efe94f743f0640b0f5b681d414.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 148, + 301, + 156 + ], + "lines": [ + { + "bbox": [ + 211, + 148, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 211, + 148, + 301, + 156 + ], + "type": "text", + "content": "(copilot, spatial, Egyptian Pyramids)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 158, + 303, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 158, + 303, + 166 + ], + "spans": [ + { + "bbox": [ + 108, + 158, + 303, + 166 + ], + "type": "text", + "content": "Q:At the end of the animation, which building does the airplane fly over?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 166, + 168, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 166, + 168, + 172 + ], + "spans": [ + { + "bbox": [ + 113, + 166, + 168, + 172 + ], + "type": "text", + "content": "A) The Eiffel Tower." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 214, + 166, + 280, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 166, + 280, + 172 + ], + "spans": [ + { + "bbox": [ + 214, + 166, + 280, + 172 + ], + "type": "text", + "content": "B) The Egyptian Pyramids" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 108, + 177, + 204, + 264 + ], + "blocks": [ + { + "bbox": [ + 108, + 177, + 204, + 264 + ], + "lines": [ + { + "bbox": [ + 108, + 177, + 204, + 264 + ], + "spans": [ + { + "bbox": [ + 108, + 177, + 204, + 264 + ], + "type": "image", + "image_path": "2cbbe9459cd07a11b8b6cf63240b6eb53b964cbf2cfd943bf25bbe734517e78c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 264, + 178, + 270 + ], + "lines": [ + { + "bbox": [ + 113, + 264, + 178, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 264, + 178, + 270 + ], + "type": "text", + "content": "RED—Baseline Answer" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 209, + 177, + 305, + 265 + ], + "blocks": [ + { + "bbox": [ + 209, + 177, + 305, + 265 + ], + "lines": [ + { + "bbox": [ + 209, + 177, + 305, + 265 + ], + "spans": [ + { + "bbox": [ + 209, + 177, + 305, + 265 + ], + "type": "image", + "image_path": "38acc18505c76dd154f63dcc30a60695263952fd5e1c996c5dbaf6981479fc57.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 264, + 247, + 270 + ], + "lines": [ + { + "bbox": [ + 214, + 264, + 247, + 270 + ], + "spans": [ + { + "bbox": [ + 214, + 264, + 247, + 270 + ], + "type": "text", + "content": "Our Answer" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 275, + 506, + 319 + ], + "lines": [ + { + "bbox": [ + 104, + 275, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 506, + 319 + ], + "type": "text", + "content": "Figure 1: Examples of four types of visual semantic-logical relationships in video QA detected by our VSLS framework: Temporal (text, time, pen), Attribute (man, attribute, white shirt), Spatial (copilot, spatial, Egyptian Pyramids), and Causal (man, causal, basketball). Green boxes indicate correct answers, while red boxes show baseline errors." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 79, + 492, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 79, + 492, + 94 + ], + "spans": [ + { + "bbox": [ + 318, + 79, + 492, + 94 + ], + "type": "text", + "content": "Q: In a room with a wall tiger and a map on the wall, there is a man wearing a white shirt. What is he doing?" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 95, + 383, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 95, + 383, + 101 + ], + "spans": [ + { + "bbox": [ + 311, + 95, + 383, + 101 + ], + "type": "text", + "content": "A) gazing at a circuit board" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 308, + 102, + 402, + 147 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 337, + 79 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 337, + 79 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 337, + 79 + ], + "type": "text", + "content": "Attribute" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 102, + 402, + 147 + ], + "lines": [ + { + "bbox": [ + 308, + 102, + 402, + 147 + ], + "spans": [ + { + "bbox": [ + 308, + 102, + 402, + 147 + ], + "type": "image", + "image_path": "ea37ec53eb15b2a0ff0e8bdea1cc5566bbdc337da0a7d13b1c404d2c4da49c1e.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 148, + 331, + 156 + ], + "lines": [ + { + "bbox": [ + 310, + 148, + 331, + 156 + ], + "spans": [ + { + "bbox": [ + 310, + 148, + 331, + 156 + ], + "type": "text", + "content": "Causal" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 403, + 102, + 501, + 148 + ], + "blocks": [ + { + "bbox": [ + 417, + 72, + 491, + 78 + ], + "lines": [ + { + "bbox": [ + 417, + 72, + 491, + 78 + ], + "spans": [ + { + "bbox": [ + 417, + 72, + 491, + 78 + ], + "type": "text", + "content": "(man, attribute, white shirt)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 410, + 95, + 440, + 101 + ], + "lines": [ + { + "bbox": [ + 410, + 95, + 440, + 101 + ], + "spans": [ + { + "bbox": [ + 410, + 95, + 440, + 101 + ], + "type": "text", + "content": "B) speaking" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 403, + 102, + 501, + 148 + ], + "lines": [ + { + "bbox": [ + 403, + 102, + 501, + 148 + ], + "spans": [ + { + "bbox": [ + 403, + 102, + 501, + 148 + ], + "type": "image", + "image_path": "c2bb1f52e9e19170d0c025e96d90330f57871bc152ba1bbb27307286e8aafd7e.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 419, + 148, + 485, + 155 + ], + "lines": [ + { + "bbox": [ + 419, + 148, + 485, + 155 + ], + "spans": [ + { + "bbox": [ + 419, + 148, + 485, + 155 + ], + "type": "text", + "content": "(man, causal, basketball)" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 156, + 501, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 156, + 501, + 163 + ], + "spans": [ + { + "bbox": [ + 309, + 156, + 501, + 163 + ], + "type": "text", + "content": "Q:After a man wearing a red short-sleeved shirt and a black hat finished" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 163, + 471, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 163, + 471, + 169 + ], + "spans": [ + { + "bbox": [ + 317, + 163, + 471, + 169 + ], + "type": "text", + "content": "speaking in front of a black background, what did this me" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 169, + 386, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 169, + 386, + 176 + ], + "spans": [ + { + "bbox": [ + 310, + 169, + 386, + 176 + ], + "type": "text", + "content": "A) picked up a mobile phone." + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 308, + 177, + 403, + 265 + ], + "blocks": [ + { + "bbox": [ + 308, + 177, + 403, + 265 + ], + "lines": [ + { + "bbox": [ + 308, + 177, + 403, + 265 + ], + "spans": [ + { + "bbox": [ + 308, + 177, + 403, + 265 + ], + "type": "image", + "image_path": "94231a5d2a0704895637946eabe85545b336071375abcfbbf71d4801fdb3ce38.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 405, + 177, + 501, + 265 + ], + "blocks": [ + { + "bbox": [ + 405, + 177, + 501, + 265 + ], + "lines": [ + { + "bbox": [ + 405, + 177, + 501, + 265 + ], + "spans": [ + { + "bbox": [ + 405, + 177, + 501, + 265 + ], + "type": "image", + "image_path": "c61ac3848f91015fed47389a4b36e33e2e9c4471568eacab710f50bb62574e37.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 104, + 336, + 504, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 336, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 336, + 504, + 370 + ], + "type": "text", + "content": "approach requires only sparse sampling (1.4% of frames per video on average) to identify critical frames, significantly reducing computational complexity compared to conventional dense sampling strategies while maintaining performance on downstream video understanding tasks." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 374, + 506, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 506, + 463 + ], + "type": "text", + "content": "Compared to conventional methods, VSLS shows three distinct advantages. First, the framework is training-free and highly efficient in comparison with dense captioning Chen et al. (2024c); Kim et al. (2024); Wang et al. (2024b) or video clustering Wang et al. (2024e); Rajan and Parameswaran (2025) strategies, sampling only " + }, + { + "bbox": [ + 104, + 374, + 506, + 463 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 104, + 374, + 506, + 463 + ], + "type": "text", + "content": " of frames on average in LVHAYSTACK. Second, it explicitly models logical binary relations (namely spatial, temporal, attribute, and causal) in the query beyond simple target detection Ye et al. (2025b), utilizing additional visual semantic features and enhancing logical consistency throughout the reasoning process. Third, VSLS is a plug-and-play module, which can be seamlessly integrated into existing VLM pipelines without cross-component dependencies." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "type": "text", + "content": "We further examine VSLS on several public datasets, including LONGVIDEOBENCH Ye et al. (2025a), a comprehensive benchmark for long video understanding; VIDEO-MME Fu et al. (2024), a widely adopted multimodal video question answering dataset; and HAYSTACK-LVBENCH Ye et al. (2025a) with meticulously annotated keyframes based on human feedback for more precise analysis. Extensive experiments demonstrate significant improvements in both the semantic similarity and temporal coverage between the retrieved keyframes and the ground truth labels, as well as the accuracy in downstream video question-answering tasks. More importantly, with only " + }, + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "type": "text", + "content": " of video frames (EGO4D Grauman et al. (2022)) sampled in the search iteration, our method achieves an " + }, + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "type": "inline_equation", + "content": "8.7\\%" + }, + { + "bbox": [ + 104, + 468, + 506, + 611 + ], + "type": "text", + "content": " improvement in GPT-4o Hurst et al. (2024)'s long video QA accuracy. This performance gain is attributed to our simple yet powerful observation: query-guided visual semantic logic retrieval can mitigate the gap between potential visual logic in video frames and the logic expressed in the query. To be specific, constructing ternary logic triplets with visual elements (e.g., object1, logic type, object2) can enhance downstream reasoning capabilities when performing textual-visual retrieval." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 615, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 504, + 647 + ], + "type": "text", + "content": "To the best of our knowledge, we are arguably the first to search for keyframes in long videos by detecting visual semantic logic, with potential extensions to other textual-visual retrieval tasks. Our main contributions are as follows:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 652, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 106, + 652, + 505, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 652, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 106, + 652, + 505, + 673 + ], + "type": "text", + "content": "- We define four fundamental types of semantic logic relations in video QA tasks, including temporal, causal, attribute, and spatial relations, which can be accurately detected across various datasets." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 674, + 504, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 674, + 504, + 697 + ], + "spans": [ + { + "bbox": [ + 106, + 674, + 504, + 697 + ], + "type": "text", + "content": "- We sample only " + }, + { + "bbox": [ + 106, + 674, + 504, + 697 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 106, + 674, + 504, + 697 + ], + "type": "text", + "content": " of frames on average of frames on average during keyframe search through heuristic sampling and distribution updating by different visual semantics and logical relations." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 698, + 505, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 505, + 731 + ], + "type": "text", + "content": "- We comprehensively evaluate retrieval efficiency, semantic similarity, temporal coverage, and video question answering accuracy across several widely used video understanding datasets, demonstrating significant improvements in downstream tasks." + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 70, + 504, + 215 + ], + "blocks": [ + { + "bbox": [ + 107, + 70, + 504, + 215 + ], + "lines": [ + { + "bbox": [ + 107, + 70, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 107, + 70, + 504, + 215 + ], + "type": "image", + "image_path": "b122957dd055813357d936dff056404604c37dce8079ebfdf60a5643e31f325f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 216, + 504, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 270 + ], + "type": "text", + "content": "Figure 2: Our VSLS Framework for Efficient Keyframe Selection. VSLS sparsely samples frames and selects key ones via object detection and logic verification. Steps: 1) Use LLM&VLM to extract cue/target objects and four logic types (spatial, temporal, attribute, causal); 2) Adaptive sampling with evolving confidence; 3) Detect objects viaYOLO-WORLD; 4) Fuse scores with a spline function to identify high-confidence frames for downstream tasks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 275, + 167, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 167, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 167, + 288 + ], + "type": "text", + "content": "2 Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 293, + 506, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 358 + ], + "type": "text", + "content": "Although existing long-context VLM frameworks implement keyframe search for video QA tasks Liang et al. (2024); Park et al. (2024); Tan et al. (2024); Wang et al. (2024a,d); Yu et al. (2024), their computational efficiency and searching accuracy remain suboptimal. To address this needle-in-a-haystack challenge Wang et al. (2025); Zhao et al. (2024), we propose a novel method VSLS that aligns the semantic relations between the text modality and video modality, enhancing the plausibility of logical reasoning and performance of downstream tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 367, + 207, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 207, + 377 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 207, + 377 + ], + "type": "text", + "content": "2.1 Task Formulation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "content": "Given a video sequence " + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "inline_equation", + "content": "V = \\{f_t\\}_{t=1}^{N_v}" + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "inline_equation", + "content": "N_v" + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "content": " frames and a query " + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "content": ", the ideal temporal search framework aims to retrieve the minimal keyframe subset " + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "inline_equation", + "content": "V^K = \\{f_{m_i}\\}_{i=1}^K \\subseteq V" + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 380, + 504, + 415 + ], + "type": "text", + "content": " keyframes that satisfies:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 419, + 505, + 465 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "text", + "content": "- Conservation: The keyframe subset " + }, + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "inline_equation", + "content": "V^K \\subseteq V" + }, + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "text", + "content": " must satisfy the answer consistency condition: " + }, + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(V^K, Q) = \\mathcal{A}(V, Q)" + }, + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\cdot)" + }, + { + "bbox": [ + 105, + 419, + 505, + 442 + ], + "type": "text", + "content": " denotes the video QA function." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "type": "text", + "content": "- Compactness: " + }, + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "type": "inline_equation", + "content": "V^K" + }, + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "type": "text", + "content": " must be a minimal subset that preserves completeness, which means that no frame in " + }, + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "type": "inline_equation", + "content": "V^K" + }, + { + "bbox": [ + 105, + 442, + 504, + 465 + ], + "type": "text", + "content": " can be removed without hindering the accuracy and efficiency of video QA." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 472, + 273, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 273, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 273, + 483 + ], + "type": "text", + "content": "2.2 Visual Semantic Logic Extraction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": "Starting from a question " + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": " and uniformly sampled frames " + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "inline_equation", + "content": "\\overline{V}_N" + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": " from video " + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": ", our goal is to extract key visual elements to answer " + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": ". We first classify the detected objects in " + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "inline_equation", + "content": "\\overline{V}_N" + }, + { + "bbox": [ + 104, + 487, + 506, + 512 + ], + "type": "text", + "content": " into two categories:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 514, + 505, + 560 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 514, + 505, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 514, + 505, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 505, + 537 + ], + "type": "text", + "content": "- Key Objects: The main participants or references in the scene that the question explicitly or implicitly focuses on (e.g., \"person\", \"microphone\")." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 537, + 504, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 537, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 504, + 560 + ], + "type": "text", + "content": "- Cue Objects: Secondary or contextual entities that help locate or disambiguate the Key Objects (e.g., \"book\", \"tiger painting\")." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": "To further leverage semantic and logical links among these objects, we define a set of relations " + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "inline_equation", + "content": "\\mathcal{R} \\subseteq \\mathcal{O} \\times \\Delta \\times \\mathcal{O}" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": ", where each relation " + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "inline_equation", + "content": "r = (o_i, \\delta, o_j) \\in \\mathcal{R}" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "inline_equation", + "content": "o_i, o_j \\in \\mathcal{O}" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": " denoting detected objects in the key and cue objects dataset, and " + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "inline_equation", + "content": "\\delta \\in \\Delta" + }, + { + "bbox": [ + 104, + 563, + 504, + 597 + ], + "type": "text", + "content": " representing one of the following types of relations:" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 106, + 599, + 501, + 720 + ], + "blocks": [ + { + "bbox": [ + 106, + 599, + 501, + 720 + ], + "lines": [ + { + "bbox": [ + 106, + 599, + 501, + 720 + ], + "spans": [ + { + "bbox": [ + 106, + 599, + 501, + 720 + ], + "type": "table", + "html": "
Spatial Co-occurrenceAttribute Dependency
oi and oj appear in the same frame, indicating co-occurrence or proximity. \nExample: “A person is standing beside a vase.” \n⇒ (person, spatial, vase)oi and oj share visual properties, e.g., color or size. \nExample: “A person wears a black shirt.” ⇒ \n(person, attribute, black shirt)
Temporal ProximityCausal Order
oi and oj occur in close frames, linking sequences or transitions. \nExample: “After a dog entered the room, a cat entered.” ⇒ (dog, temporal, cat)oi and oj follow a cause-effect or prerequisite order. \nExample: “A little girl broke the vase.” ⇒ \n(little girl, causal, pieces)
", + "image_path": "d47a19ff9a05498992989851401417220f3bb6fb49d58def00dde2c82c804a66.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 88, + 504, + 292 + ], + "blocks": [ + { + "bbox": [ + 106, + 75, + 306, + 87 + ], + "lines": [ + { + "bbox": [ + 106, + 75, + 306, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 75, + 306, + 87 + ], + "type": "text", + "content": "Algorithm 1: Visual Semantic-Logical Search" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "lines": [ + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": "Function SemanticLogicalTemporalSearch(V,Q,K, " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\Delta_t,\\tau ,\\alpha ,\\gamma" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\mathcal{O},\\mathcal{R}\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " ParseQuestion(Q) // Extract key/cue objects and relations \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "P\\leftarrow" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " Uniform, " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "B\\leftarrow |V|,S\\leftarrow \\emptyset ,N_{v}\\leftarrow |V|" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " // Initialize distribution and state \nwhile " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "B > 0" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "|\\mathcal{O}| > 0" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "k\\gets \\lfloor \\sqrt{B}\\rfloor ,G\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " Grid(Sample " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "(P,k^2)" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": ") // Adaptive grid sampling \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\Omega \\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " DetectObjects(G) // Detect objects in sampled frames \nforeach " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "t\\in G" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "C_t\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " CalculateBaseScore( " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\Omega_t" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " ) // Base detection confidence \nforeach " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "r_{type}\\in \\mathcal{R}" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\delta \\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " Processrelation(rtype, " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\Omega ,\\Delta_t,\\tau ,\\alpha ,\\gamma)" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " //relations require distinct processing \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "C_t\\gets C_t + \\delta" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " UpdateScores " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "(S,t,C_t)" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " //Update global score registry \nDiffuseScores(S,w) // Temporal context propagation \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "P\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " NormalizeDistribution(S), " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "B\\gets B - k^{2}" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " // Update sampling distribution \nforeach " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "g\\in \\mathrm{TopK}(S,K)" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " do \nif " + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\Omega [g]\\cap \\mathcal{O}\\neq \\emptyset" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "text", + "content": " then // Remove identified key objects \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 292 + ], + "type": "inline_equation", + "content": "\\begin{array}{rlrl} & {\\mathcal{O}} & {\\leftarrow \\mathcal{O}\\backslash \\Omega [g]} & {} \\end{array}" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "type": "text", + "content": "The choice of these four relations draws on core concepts in linguistics and logic Cohen (1968); Sowa (2000); Talmy (2000), which identify spatial, temporal, attributive, and causal aspects as fundamental for structuring, perceiving, and communicating information about events and states. For more details on this selection, please see appendix A for reference. As shown in Figure 1, we construct semantic-logical relations that support a broad range of question-answering tasks. Specifically, questions involving temporal queries (when does " + }, + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "type": "text", + "content": " happen?), causal reasoning (why did " + }, + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 104, + 316, + 506, + 416 + ], + "type": "text", + "content": " occur?\"), attribute dependence (What is the person wearing sunglasses doing?), or spatial constraints (Who is standing next to the red car?) can be answered more reliably by incorporating these structured relations and contextual cues." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 424, + 320, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 424, + 320, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 320, + 436 + ], + "type": "text", + "content": "2.3 Iterative Semantic-Logical Temporal Search" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 504, + 495 + ], + "type": "text", + "content": "Based on the extracted key and cue objects and their logic relations, our algorithm iteratively searches for keyframes through semantic and logical reasoning, including four main stages: Frame Sampling (Sec. 2.3.1), Object Detection and Scoring (Sec. 2.3.2), Visual Semantic Logic Detection (Sec. 2.3.3), and Distribution Update (Sec. 2.3.4). The pseudocode is shown in Algorithm 1, and Algorithm 2 provides a more detailed version." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 502, + 211, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 211, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 211, + 514 + ], + "type": "text", + "content": "2.3.1 Frame Sampling" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "text", + "content": "To accelerate the search process, we avoid exhaustively scanning all " + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "inline_equation", + "content": "N_v" + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "text", + "content": " video frames and instead employ a distributed sampling strategy. Let " + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "inline_equation", + "content": "N_v" + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "text", + "content": " denote the total number of frames in the video, and " + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 516, + 504, + 559 + ], + "type": "text", + "content": " be a uniformly initialized sampling distribution over all frames. The sampling process is then defined as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 246, + 559, + 504, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 559, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 246, + 559, + 504, + 572 + ], + "type": "interline_equation", + "content": "I _ {s} = \\operatorname {S a m p l e} \\left(P \\odot N _ {v}, N _ {s}\\right), \\tag {1}", + "image_path": "7e6ec89ef236cb486a46fb294d0375b8275a409cdda4409bec158388efd8cfb3.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "inline_equation", + "content": "\\mathrm{Sample}(\\cdot ,N_s)" + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": " selects a subset of " + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "inline_equation", + "content": "N_{s}" + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": " frames according to the distribution " + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "inline_equation", + "content": "P\\odot N_v" + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": " . To further leverage the detecting ability ofYOLO, we stack the sampled frames into a " + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "inline_equation", + "content": "k\\times k" + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": " grid, which imposes a constraint on the sample size " + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "inline_equation", + "content": "N_{s}" + }, + { + "bbox": [ + 104, + 575, + 504, + 609 + ], + "type": "text", + "content": " . Specifically, we require:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 227, + 614, + 504, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 614, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 227, + 614, + 504, + 628 + ], + "type": "interline_equation", + "content": "N _ {s} \\in \\{k ^ {2} \\mid k \\in \\mathbb {Z} \\} \\quad \\text {a n d} \\quad N _ {s} < N _ {v}. \\tag {2}", + "image_path": "c3f0aa82c5f958d8abd4d9ad4624a66f9da286d41b402ba7581849377bde1843.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 634, + 505, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 505, + 668 + ], + "type": "text", + "content": "In practice, this ensures that the number of sampled frames can be reshaped into a compact 2D grid for efficient processing. Although " + }, + { + "bbox": [ + 104, + 634, + 505, + 668 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 634, + 505, + 668 + ], + "type": "text", + "content": " is initially uniform, it can be adapted over multiple rounds of sampling to focus on frames of higher interest in the video." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 674, + 264, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 674, + 264, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 674, + 264, + 687 + ], + "type": "text", + "content": "2.3.2 Object Detection and Scoring" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "In this stage, we construct the detection search space by taking the union of both key objects and cue objects. For each iteration, we detect objects on the " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "N_{s}" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": " sampled frames using a lightweight model like YOLO-WORLD Cheng et al. (2024a) for high efficiency and score the frames based on detection" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 73, + 504, + 182 + ], + "blocks": [ + { + "bbox": [ + 105, + 73, + 504, + 182 + ], + "lines": [ + { + "bbox": [ + 105, + 73, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 504, + 182 + ], + "type": "image", + "image_path": "f4ce9972802824b1e904f47e93a3d933772e13b6530cb6d80f8fb6b800e4bad8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 188, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 104, + 188, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 504, + 232 + ], + "type": "text", + "content": "Figure 3: Sample weight evolution under VSLS optimization for keyframe selection. Top: 16 iterations show progressive convergence toward Ground Truth (red). Bottom: 15 iterations demonstrate similar alignment. Yellow highlights indicate precise matches between algorithm outputs (green) and manual annotations." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "content": "confidence. Specifically, let " + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "inline_equation", + "content": "\\Omega_t" + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "content": " be the set of detected objects in the frame at time " + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "inline_equation", + "content": "c_o" + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "content": " the confidence of each detected object, and " + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "inline_equation", + "content": "w_o" + }, + { + "bbox": [ + 104, + 239, + 504, + 264 + ], + "type": "text", + "content": " the corresponding weight. We define the frame score as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 262, + 268, + 505, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 268, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 262, + 268, + 505, + 287 + ], + "type": "interline_equation", + "content": "C _ {t} = \\max _ {o \\in \\Omega_ {t}} \\left(c _ {o} \\cdot w _ {o}\\right). \\tag {3}", + "image_path": "5ac759c399e4241d5fdbfd16a0f85ca6b782a2f2813aa2bbd1cc48e98d36fdae.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 293, + 506, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 318 + ], + "type": "text", + "content": "If the confidence score of any key object exceeds a predefined threshold, it is added to a list, thereby maintaining a record of frames where crucial targets have been identified for subsequent processing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 323, + 277, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 277, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 277, + 335 + ], + "type": "text", + "content": "2.3.3 Visual Semantic Logic Detection" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": "Beyond individual object detection and frame-level scoring, we refine each frame's confidence score by modeling higher-order object relations. Let " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " be the set of relations, where each " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "r\\in \\mathcal{R}" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " involves a pair " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "(o_1,o_2)" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " and is labeled by a type " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{type}}" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": ". Denote " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "C_t" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " as the confidence score at time " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": ", with a global scaling factor " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " and a relation-specific weight " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "\\gamma_{r_{\\mathrm{type}}}" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " controlling each logic type's impact. The refined confidence " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "C_t^{(r)}" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " after applying relation " + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 338, + 504, + 398 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 258, + 404, + 505, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 404, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 258, + 404, + 505, + 422 + ], + "type": "interline_equation", + "content": "C _ {t} ^ {(r)} = C _ {t} + \\alpha \\cdot \\gamma_ {r _ {\\text {t y p e}}}. \\tag {4}", + "image_path": "0d565d9512a082fd672adf12ff6042cc2378eeb821c3f9581e7b4235f2cc1bcb.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": "Spatial Relation. A spatial relation enforces that two objects " + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "inline_equation", + "content": "o_1" + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "inline_equation", + "content": "o_2" + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": " must co-occur in the same frame. Let " + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\Omega_t" + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": " be the set of detected objects in frame " + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": ". If both " + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "inline_equation", + "content": "o_1 \\in \\Omega_t" + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "inline_equation", + "content": "o_2 \\in \\Omega_t" + }, + { + "bbox": [ + 104, + 426, + 504, + 460 + ], + "type": "text", + "content": ", then the corresponding frame confidence is updated as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 258, + 467, + 505, + 480 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 467, + 505, + 480 + ], + "spans": [ + { + "bbox": [ + 258, + 467, + 505, + 480 + ], + "type": "interline_equation", + "content": "C _ {t} \\leftarrow C _ {t} + \\alpha \\cdot \\gamma_ {\\text {s p a t i a l}}. \\tag {5}", + "image_path": "dca7b5107d57502a594576fb2a7086ecdb83f65dea7c30f15ae11f64aeee5585.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "text", + "content": "Attribute Relation. An attribute relation is satisfied when " + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "inline_equation", + "content": "o_1" + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "inline_equation", + "content": "o_2" + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "text", + "content": " share sufficient bounding-box overlap in the same frame. Let overlap be the ratio of their intersection area to the minimum of their individual bounding-box areas. If the overlap ratio exceeds a predefined threshold " + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "inline_equation", + "content": "\\tau = 0.5" + }, + { + "bbox": [ + 104, + 486, + 504, + 532 + ], + "type": "text", + "content": " in our experimental setting), we increase the frame confidence:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 255, + 538, + 505, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 538, + 505, + 550 + ], + "spans": [ + { + "bbox": [ + 255, + 538, + 505, + 550 + ], + "type": "interline_equation", + "content": "C _ {t} \\leftarrow C _ {t} + \\alpha \\cdot \\gamma_ {\\text {a t t r i b u t e}}. \\tag {6}", + "image_path": "5408beda2a39e51f3d8b63e6913b6b998eaf0668924dec183510cf1a30f61ce8.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": "Time Relation. A time relation checks whether two objects appear in temporally close frames. Suppose " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "t_i \\leq t_j" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": ") are sampled such that " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "|t_j - t_i| < \\Delta_t" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "\\Delta_t" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": " is a threshold (e.g. 5 frames in our experimental setting), if " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "o_1" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": " occurs in frame " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "o_2" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": " in frame " + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 104, + 557, + 506, + 601 + ], + "type": "text", + "content": ", then both frames' confidence are updated:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 206, + 608, + 505, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 608, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 206, + 608, + 505, + 622 + ], + "type": "interline_equation", + "content": "C _ {t _ {i}} \\leftarrow C _ {t _ {i}} + \\alpha \\cdot \\gamma_ {\\text {t i m e}}, \\quad C _ {t _ {j}} \\leftarrow C _ {t _ {j}} + \\alpha \\cdot \\gamma_ {\\text {t i m e}}. \\tag {7}", + "image_path": "befbb97408628da460b2c29aea40fd3443321e17d35cabd22a3efcca679f78c4.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": "Causal Relation. A causal relation models an ordering constraint, enforcing that " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "o_1" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": " must appear at an earlier time than " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "o_2" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": ". Specifically, if " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "o_1 \\in \\Omega_{t_i}" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "o_2 \\in \\Omega_{t_j}" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "t_i < t_j" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": ", we update the confidence of frames " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 104, + 628, + 504, + 662 + ], + "type": "text", + "content": " by:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 201, + 668, + 505, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 668, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 201, + 668, + 505, + 682 + ], + "type": "interline_equation", + "content": "C _ {t _ {i}} \\leftarrow C _ {t _ {i}} + \\alpha \\cdot \\gamma_ {\\text {c a u s a l}}, \\quad C _ {t _ {j}} \\leftarrow C _ {t _ {j}} + \\alpha \\cdot \\gamma_ {\\text {c a u s a l}}. \\tag {8}", + "image_path": "511325680ed4273ed05a5920b32ea94b3a9cbc28b135c165cbc68b931bd57695.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "type": "text", + "content": "Through this scoring mechanism, frames with detected relations will have greater confidence and are more likely to be retrieved as keyframes for the given query and video. We have also conducted hyperparameter search experiments, and find that " + }, + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "type": "inline_equation", + "content": "\\alpha = 0.3" + }, + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "type": "text", + "content": " (from 0.3, 0.5, 0.7, 1.0) and " + }, + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "type": "inline_equation", + "content": "\\gamma_{r_{\\mathrm{type}}} = 0.5" + }, + { + "bbox": [ + 104, + 688, + 505, + 731 + ], + "type": "text", + "content": " achieve the best results across different datasets." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 224, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 224, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 224, + 84 + ], + "type": "text", + "content": "2.3.4 Distribution Update" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "spans": [ + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": "After each iteration of frame sampling, we merge newly obtained frame confidences into the global score distribution " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "\\{S_f\\}" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": " spanning all frames " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "f = 1,2,\\dots ,N_v" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": ". When a frame " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": " is selected for detection, its score is assigned to the confidence value " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "C_f" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": ", and the visitation counter " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "N_{v,f}" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": " is reset to 0. To incorporate temporal context, we diffuse this updated score to neighboring frames within a window of size " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": ". Denoting each nearby index by " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "f\\pm \\delta" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": " (for " + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "inline_equation", + "content": "\\delta \\in [-w,w]" + }, + { + "bbox": [ + 104, + 87, + 504, + 142 + ], + "type": "text", + "content": "), we apply:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 236, + 145, + 505, + 172 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 145, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 236, + 145, + 505, + 172 + ], + "type": "interline_equation", + "content": "S _ {f \\pm \\delta} \\leftarrow \\max \\left(S _ {f \\pm \\delta}, \\frac {S _ {f}}{1 + | \\delta |}\\right). \\tag {9}", + "image_path": "0ac1d21efd0cb56e7487c4fd721935d9ea73ceb841af753fd12a4061a0102ba1.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "text", + "content": "In this way, high-confidence frames raise the scores of close-by frames, reflecting temporal continuity. Following these local updates, the sampling distribution " + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "text", + "content": " is refined using spline interpolation, and then normalized. This iteration proceeds until either the search budget " + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "text", + "content": " is reached or all key objects have been successfully identified. The visualization of distribution in different iterations can be seen in Figure 3. Finally, the method outputs the top " + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 175, + 506, + 231 + ], + "type": "text", + "content": " frames according to their terminal scores." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 236, + 187, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 187, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 187, + 248 + ], + "type": "text", + "content": "3 Experiment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 251, + 219, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 219, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 219, + 262 + ], + "type": "text", + "content": "3.1 Benchmark Datasets" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 266, + 506, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 506, + 355 + ], + "type": "text", + "content": "The proposed VSLS is systematically evaluated across four benchmark datasets: a) LONGVIDEOBENCH Ye et al. (2025a) for assessing long-context video-language comprehension capabilities; b) Video-MME Fu et al. (2024) as the first comprehensive benchmark for multimodal video analytics; c) HAYSTACK-LVBENCH, extended from LONGVIDEOBENCH with human-annotated frame index answers; and d) HAYSTACK-EGO4D, derived from EGO4D with similar annotations. While LONGVIDEOBENCH and Video-MME measure performance enhancement in QA accuracy, HAYSTACK-EGO4D and HAYSTACK-LVBENCH quantitatively evaluate keyframe selection accuracy through recall and precision metrics. Further details of datasets are provided in Appendix D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 360, + 212, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 360, + 212, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 212, + 370 + ], + "type": "text", + "content": "3.2 Evaluation Metrics" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 375, + 296, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 375, + 296, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 375, + 296, + 387 + ], + "type": "text", + "content": "3.2.1 Evaluation Metrics for Search Utility" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "content": "Our assessment framework emphasizes both effectiveness and efficiency. For search effectiveness, we use three metrics to compare model-predicted keyframes with human annotations, considering both individual frames and full sets—addressing the possibility of multiple valid keyframe sets per query. For frame-level comparison, we evaluate the alignment between a predicted frame " + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{pt}}" + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "content": " and a human-annotated frame " + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gt}}" + }, + { + "bbox": [ + 104, + 388, + 504, + 444 + ], + "type": "text", + "content": " from two perspectives:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 446, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 446, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 446, + 504, + 468 + ], + "type": "text", + "content": "Temporal coverage evaluates the coverage of ground truth frames by predicted frames in the temporal perspective, which can be described as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 211, + 471, + 505, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 471, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 211, + 471, + 505, + 514 + ], + "type": "interline_equation", + "content": "T _ {\\text {c o v e r}} \\left(T _ {\\mathrm {p t}}, T _ {\\mathrm {g t}}\\right) = \\frac {\\sum_ {i = 1} ^ {| N _ {\\mathrm {g t}} |} \\mathbb {I} \\left[ \\min _ {j} \\left| t _ {\\mathrm {g t}} ^ {i} - t _ {\\mathrm {p t}} ^ {j} \\right| \\leq \\delta \\right]}{| N _ {\\mathrm {g t}} |}, \\tag {10}", + "image_path": "77ffdb555eea1232cf87a622c67c7e006cf68311bef63dd1fbc645e13a499a9d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{pt}}" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{gt}}" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " denote the sets of predicted and ground truth timestamps, respectively. Here, " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "|N_{\\mathrm{gt}}|" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " is the number of ground truth frames, " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{gt}}^i" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{pt}}^j" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " are the " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": "-th ground truth and " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": "-th predicted timestamps, respectively. " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " is the temporal similarity threshold defining the maximum allowed time deviation, and " + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "inline_equation", + "content": "\\mathbb{I}[\\cdot]" + }, + { + "bbox": [ + 104, + 521, + 504, + 568 + ], + "type": "text", + "content": " is the indicator function, returning 1 if the condition holds and 0 otherwise." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": "Visual Similarity is measured by the Structural Similarity Index (SSIM) Brunet et al. (2012), capturing structural detail, luminance, and contrast between " + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{pt}}" + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gt}}" + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": ". For set-to-set comparison, the key challenge is defining inter-set similarity. We adopt Precision " + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": " and Recall " + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": " as complementary metrics: Precision checks whether each predicted frame matches any reference frame, while Recall ensures that all reference frames are represented. Given the ground truth set " + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{gt}} = f^{j}\\mathrm{gt}^{n}j = 1" + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": " and the predicted set " + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{pt}} = f^{i}\\mathrm{pt}^{m}i = 1" + }, + { + "bbox": [ + 104, + 570, + 504, + 640 + ], + "type": "text", + "content": ", we define the multimodal retrieval quality metrics as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 213, + 648, + 505, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 648, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 213, + 648, + 505, + 715 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} P \\left(F _ {\\mathrm {p t}}, F _ {\\mathrm {g t}}\\right) = \\frac {1}{\\left| F _ {\\mathrm {p t}} \\right|} \\sum_ {f _ {\\mathrm {p t}} ^ {i} \\in F _ {\\mathrm {p t}}} \\max _ {f _ {\\mathrm {g t}} ^ {j} \\in F _ {\\mathrm {g t}}} \\phi \\left(f _ {\\mathrm {p t}} ^ {i}, f _ {\\mathrm {g t}} ^ {j}\\right), \\\\ R \\left(F _ {\\mathrm {p t}}, F _ {\\mathrm {g t}}\\right) = \\frac {1}{\\left| F _ {\\mathrm {g t}} \\right|} \\sum_ {f _ {\\mathrm {g t}} ^ {j} \\in F _ {\\mathrm {g t}}} \\max _ {f _ {\\mathrm {p t}} ^ {i} \\in F _ {\\mathrm {p t}}} \\phi \\left(f _ {\\mathrm {g t}} ^ {j}, f _ {\\mathrm {p t}} ^ {i}\\right), \\end{array} \\right. \\tag {11a}", + "image_path": "b7caaad2a9fefb25b612157ad0bb1360615e5b13b8e9d761838013ee4af882fd.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 718, + 414, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 718, + 414, + 730 + ], + "spans": [ + { + "bbox": [ + 107, + 718, + 414, + 730 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 107, + 718, + 414, + 730 + ], + "type": "inline_equation", + "content": "\\phi (\\cdot ,\\cdot)" + }, + { + "bbox": [ + 107, + 718, + 414, + 730 + ], + "type": "text", + "content": " represents an extensible multimodal similarity metric function." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 504, + 160 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 504, + 160 + ], + "lines": [ + { + "bbox": [ + 106, + 71, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 504, + 160 + ], + "type": "table", + "html": "
MethodTraining RequiredSearching EfficiencyOverall Task Efficiency
MatchingIterationTFLOPs ↓Latency (sec) ↓Latency (sec) ↓Acc ↑
Static Frame Sampling
UNIFORM-8 Ye et al. (2025a)Training-BasedN/AN/AN/A0.23.853.7
Dense Retrieval
VIDEOAGENT Fan et al. (2024)Training-BasedCLIP-1B Radford et al. (2021)840536.530.234.949.2
T*-RETRIEVAL Ye et al. (2025b)Training-BasedYOLO-WORLD-110M840216.128.632.257.3
Temporal Search
T*-ATTENTION Ye et al. (2025b)Training-BasedN/AN/A88.913.717.359.3
T*-DETECTOR Ye et al. (2025b)Training-FreeYOLO-WORLD-110M4331.77.311.159.8
VSLS (OURS)-DETECTORTraining-FreeYOLO-WORLD-110M4933.37.811.661.5
", + "image_path": "97000cb287c16fdfdb7eb5b57ac87b488b53bdf10f7712a12a7a1d22a55787d8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 166, + 504, + 189 + ], + "lines": [ + { + "bbox": [ + 104, + 166, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 166, + 504, + 189 + ], + "type": "text", + "content": "Table 1: Evaluation of performance metrics across the LV-HAYSTACK benchmark, presenting both search efficiency and end-to-end processing overhead (combining search and inference stages)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 196, + 308, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 196, + 308, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 308, + 208 + ], + "type": "text", + "content": "3.2.2 Evaluation Metrics for Search efficiency" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 210, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 210, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 506, + 266 + ], + "type": "text", + "content": "Existing studies Fan et al. (2024); Park et al. (2024); Wang et al. (2024a,d); Wu and Xie (2023) have mainly concentrated on optimizing task-specific performance metrics while neglecting computational efficiency in temporal search operations. To systematically analyze this dimension, our evaluation framework incorporates two criteria: 1) FLOPs representing arithmetic operation complexity, and 2) Latency recording real-world execution duration." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 275, + 313, + 287 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 313, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 313, + 287 + ], + "type": "text", + "content": "3.3 Evaluation of Search Framework efficiency" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "text", + "content": "Current approaches for keyframe selection can be broadly categorized into three paradigms: statistic-based frame sampling, dense feature retrieval-based selection, and temporal search-based methods. As shown in Table 1, while uniform sampling achieves the fastest processing speed, its ignorance of frame semantics severely limits downstream task effectiveness. Although dense feature retrieval methods attain moderate accuracy improvements (57.3%), their exhaustive frame processing demands " + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "inline_equation", + "content": "4.2 \\times" + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "text", + "content": " more TFLOPs and introduces " + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "inline_equation", + "content": "4.5 \\times" + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "text", + "content": " higher latency than our temporal search approach. Crucially, our method introduces four visual semantic logic detectors during temporal search while maintaining comparable execution time to T* methods. This strategic design elevates downstream task accuracy to " + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "inline_equation", + "content": "61.5\\%" + }, + { + "bbox": [ + 104, + 291, + 506, + 390 + ], + "type": "text", + "content": ", achieving the best performance-efficiency trade-off." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 399, + 315, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 315, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 315, + 410 + ], + "type": "text", + "content": "3.4 Visual Semantic Logic Search Performance" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "content": "As demonstrated in Table 2, we evaluate VSLS on LONGVIDEOBENCH from two critical perspectives: visual similarity (measured by precision and recall) and temporal coverage. Our method achieves state-of-the-art performance across all metrics. Specifically, under the 32-frame setting, VSLS attains a precision of " + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "inline_equation", + "content": "74.5\\%" + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "content": " and recall of " + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "inline_equation", + "content": "92.5\\%" + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "content": ", outperforming all baselines in visual similarity. More notably, the temporal coverage of VSLS reaches " + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "inline_equation", + "content": "41.4\\%" + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "content": ", surpassing the second-best method (" + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "inline_equation", + "content": "T* at 36.5\\%" + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "content": ") by " + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "inline_equation", + "content": "13.4\\%" + }, + { + "bbox": [ + 104, + 415, + 506, + 502 + ], + "type": "text", + "content": "—the largest margin among all comparisons. This significant improvement highlights the effectiveness of our visual semantic logic detection modules in identifying query-relevant keyframes with both semantic alignment and temporal completeness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 507, + 306, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 306, + 585 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 306, + 585 + ], + "type": "text", + "content": "These results empirically support our core hypothesis: leveraging semantic and logical cues from text queries enables precise detection of relevant video frames. Improvements in visual similarity and temporal coverage confirm that VSLS effectively captures keyframes while preserving temporal coherence through visual-logical alignment." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 594, + 288, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 288, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 288, + 605 + ], + "type": "text", + "content": "3.5 Downstream Video QA Performance" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 609, + 305, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 305, + 718 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 305, + 718 + ], + "type": "text", + "content": "To demonstrate the advantages of VSLS, we evaluate downstream video QA performance on LONGVIDEOBENCH and VIDEO-MME. As shown in Table 3, videos are grouped by length into Short, Medium, and Long (15-3600s, up to 60 mins). VSLS consistently achieves the highest accuracy in the long-video category across different frame counts and QA models. Compared to the baseline T*, incorporating our visual semantic logic relations (Figure 1) yields substantial gains." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 312, + 570, + 506, + 707 + ], + "blocks": [ + { + "bbox": [ + 310, + 510, + 504, + 565 + ], + "lines": [ + { + "bbox": [ + 310, + 510, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 310, + 510, + 504, + 565 + ], + "type": "text", + "content": "Table 2: Search utility results on LONGVIDEOBENCH. Best scores in the 8-frame setting are underlined, and in the 32-frame setting are bold. Gray indicates results from the original paper." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 312, + 570, + 506, + 707 + ], + "lines": [ + { + "bbox": [ + 312, + 570, + 506, + 707 + ], + "spans": [ + { + "bbox": [ + 312, + 570, + 506, + 707 + ], + "type": "table", + "html": "
MethodFrameLONGVIDEOBENCH
Precision ↑Recall ↑Time ↑
Static Frame Sampling Method
UNIFORM Ye et al. (2025a)856.072.06.3
UNIFORM860.780.44.7
UNIFORM3258.781.624.9
UNIFORM3260.285.08.1
Dense Retrieval Method
VIDEOAGENT Fan et al. (2024)10.158.873.28.5
RETRIEVAL-BASED Ye et al. (2025b)863.165.56.3
RETRIEVAL-BASED3259.980.821.8
Temporal Searching Method
T* Ye et al. (2025b)858.472.77.1
T*875.388.226.2
VSLS (ours)875.688.626.3
T*3258.383.228.2
T*3274.090.336.5
VSLS (ours)3274.592.541.4
", + "image_path": "9418dbcf790187e3dd3a1499947366d89992e9107a4f5b6b4e378bd6721a2cc2.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 105, + 719, + 498, + 731 + ], + "lines": [ + { + "bbox": [ + 105, + 719, + 498, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 498, + 731 + ], + "type": "text", + "content": "These results confirm that modeling visual-logical relations is key to effective QA on long videos." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 71, + 504, + 213 + ], + "blocks": [ + { + "bbox": [ + 106, + 71, + 504, + 213 + ], + "lines": [ + { + "bbox": [ + 106, + 71, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 504, + 213 + ], + "type": "table", + "html": "
LONGVIDEOBENCHVIDEO-MME
Model and SizeFrameVideo LengthModel and SizeFrameVideo Length
Long 900-3600sMedium 180-600sShort 15-60sLong 30-60minMedium 4-15min
GPT-4o Hurst et al. (2024)847.149.467.3GPT-4o855.260.2
GPT-4o + T*849.156.268.0GPT-4o + T*855.261.2
GPT-4o + VSLS (ours)851.258.974GPT-4o + VSLS (ours)856.960.7
INTERNVL 2.5-78B Chen et al. (2024d)855.757.374.0INTERNVL 2.5-78B852.655.5
INTERNVL 2.5-78B + VSLS (ours)858.061.574.0INTERNVL 2.5-78B + VSLS (ours)857.757.5
GPT-4o3253.856.574.0GPT-4o3255.261.0
GPT-4o + T*3255.358.872.0GPT-4o + T*3255.261.6
GPT-4o + VSLS (ours)3254.260.076.0GPT-4o + VSLS (ours)3255.261.9
LLAVA-ONEVISION-QWEN2-78B-OV3259.363.977.4LLaVA-OneVision-78B3260.062.2
PLLAVA-34B3249.150.866.8VIDEOLLAMA 23257.659.9
LLAVA-VIDEO-78B-QWEN212859.363.977.4ORYX-1.512859.365.3
MPLUG-OWL3-7B12853.958.873.7ARIA-8x3.5B25658.867.0
GPT-4o (0513)25661.666.776.8GEMINI-1.5-Pro (0615)1/0.5 fps67.474.3
", + "image_path": "3269a0ba4ba415d7370c101f6a7dce166285be0b34ece8282452072c7694fb9a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 218, + 504, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 218, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 504, + 264 + ], + "type": "text", + "content": "Table 3: Downstream task evaluation results on two benchmarks. All accuracy scores (\\%) in black are from our replication. We also cite the reported accuracy of SOTA models in gray (noting that their settings may differ and results may not be reproducible), along with their number of frames used for QA inference, for full transparency." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 272, + 170, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 170, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 170, + 285 + ], + "type": "text", + "content": "4 Analysis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 293, + 339, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 339, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 339, + 304 + ], + "type": "text", + "content": "4.1 Coverage Analysis of Semantic-Logical Relations" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 313, + 506, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 313, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 506, + 390 + ], + "type": "text", + "content": "To ascertain the practical applicability and coverage of our defined semantic-logical relations (spatial, temporal, attribute, and causal), we conducted an analysis of their detection across all queries in the LongVideoBench and VideoMME datasets. Our findings reveal a crucial insight: for every question posed within these extensive VQA benchmarks, our query analysis module successfully identified and mapped the query to at least one of the four defined logical relation types. This empirical result supports the completeness of our proposed relation set for interpreting the semantic and logical intent inherent in these VQA tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 400, + 205, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 205, + 412 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 205, + 412 + ], + "type": "text", + "content": "4.2 Time Complexity" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": "The proposed framework consists of two stages. First, VLMs such as LLAVA-7B and GPT-40 extract a semantic set " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": " from a video " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": " frames. " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": " includes target objects, cue objects, and their relations, with their size constrained by prompt design. In the second stage, keyframe identification is performed via a heuristic search: " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": " candidates are iteratively selected using a scoring function " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "h(\\cdot ,S)" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": ". The score distribution scores " + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "inline_equation", + "content": "[n]" + }, + { + "bbox": [ + 104, + 416, + 306, + 536 + ], + "type": "text", + "content": " is dynamically refined using outputs from the YOLO-WORLD detector." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 313, + 420, + 503, + 535 + ], + "blocks": [ + { + "bbox": [ + 313, + 420, + 503, + 535 + ], + "lines": [ + { + "bbox": [ + 313, + 420, + 503, + 535 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 503, + 535 + ], + "type": "image", + "image_path": "70069406c3ee25f7805367e06c6d48170b97455f4be6514bb40965b70097b46f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 543, + 506, + 610 + ], + "lines": [ + { + "bbox": [ + 310, + 543, + 506, + 610 + ], + "spans": [ + { + "bbox": [ + 310, + 543, + 506, + 610 + ], + "type": "text", + "content": "Figure 4: Average occurrences of detected semantic-logical relation types per question on the VideoMME and LongVideoBench datasets. Spatial relations are the most frequently identified, while all queries in both datasets triggered at least one of the four relation types." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "text", + "content": "Our analysis focuses on YOLO-WORLD detections, the main computational bottleneck due to their reliance on deep neural networks. Reducing the number of detections improves efficiency without sacrificing accuracy. At each iteration, the detector processes " + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "text", + "content": " selected frames to match objects and relations in " + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "text", + "content": ", yielding " + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 541, + 306, + 618 + ], + "type": "text", + "content": " detections. The" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": "search stops when all targets are found or the iteration budget " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\min(1000, 0.1 \\times V_t)" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "V_t" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " as the video duration in seconds) is exhausted. In the worst case (e.g., videos with " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": ">10,000" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " frames and no matches), the cap is 1,000 iterations. Ideally, the evaluation function " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "h(\\cdot, S)" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " assigns high confidence to target frames, making the algorithm resemble top-" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " selection over " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " candidates in " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(|S| \\log n)" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " iterations Ye et al. (2025b), resulting in an average of " + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(|S| k \\log n)" + }, + { + "bbox": [ + 104, + 617, + 506, + 674 + ], + "type": "text", + "content": " YOLO-WORLD inferences." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 507, + 723 + ], + "type": "text", + "content": "Experimental results also demonstrate that integrating relational information into the search algorithm incurs negligible computational overhead compared to the baseline T* approach. On the LV-HAYSTACK benchmark, the average iteration count increases from 42.94 (T*) to 48.82 iterations, representing a modest " + }, + { + "bbox": [ + 104, + 677, + 507, + 723 + ], + "type": "inline_equation", + "content": "13.69\\%" + }, + { + "bbox": [ + 104, + 677, + 507, + 723 + ], + "type": "text", + "content": " rise in the time cost." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 271, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 271, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 271, + 83 + ], + "type": "text", + "content": "4.3 Ablation Study of Four Relations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 94, + 282, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 94, + 282, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 94, + 282, + 247 + ], + "type": "text", + "content": "Figure 4 illustrates the distribution of four logic relation types across LONGVIDEOBENCH and VIDEO-MME datasets, where spatial relations predominate, followed by attribute relations. In Table 4, we extract samples containing different relation types from LONGVIDEOBENCH to compare the object detection-based T* method with our VSLS approach. Experimental results demonstrate that VSLS achieves significant improvements across both image similarity metrics (SSIM Precision and SSIM Recall). Additionally, temporal coverage shows marked enhance" + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 292, + 78, + 501, + 193 + ], + "blocks": [ + { + "bbox": [ + 292, + 78, + 501, + 193 + ], + "lines": [ + { + "bbox": [ + 292, + 78, + 501, + 193 + ], + "spans": [ + { + "bbox": [ + 292, + 78, + 501, + 193 + ], + "type": "table", + "html": "
Logic TypeMethodLONGVIDEOBENCH
Precision ↑Recall ↑TC ↑
SpatialT*72.988.737.5
VSLS (ours)73.691.445.5
AttributeT*71.887.638.5
VSLS (ours)72.790.942.1
TimeT*76.789.237.3
VSLS (ours)77.592.536.1
CasualT*74.792.438.6
VSLS (ours)74.793.839.6
", + "image_path": "cb19cfb9dbee0c99a276e2b549faec542655bf11e4318afd9cb3e3b30c96200d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 286, + 198, + 506, + 243 + ], + "lines": [ + { + "bbox": [ + 286, + 198, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 286, + 198, + 506, + 243 + ], + "type": "text", + "content": "Table 4: Comparison of our method (VSLS) with the baseline across four logic relation types on LONGVIDEOBENCH. Precision: SSIM Precision; Recall: SSIM Recall; TC: Temporal Coverage." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 247, + 504, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 313 + ], + "type": "text", + "content": "ment for attribute, spatial, and causal relations, with spatial relations exhibiting the most substantial improvement (21.3% increase over T*). For the time relation category, we observe a slight decrease in temporal coverage, which may be attributed to the relative scarcity of time relation samples in the dataset, limiting the opportunity to demonstrate the advantages of VSLS. Nevertheless, Figure 1 provides visual evidence of how effectively leveraging time relations can facilitate downstream question-answering tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 326, + 197, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 326, + 197, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 197, + 338 + ], + "type": "text", + "content": "5 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 350, + 506, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 450 + ], + "type": "text", + "content": "Challenges in Long Video Understanding: Long video understanding is inherently more challenging than short-video or image-based tasks due to its rich temporal dynamics and massive redundancy Qian et al. (2024); Zeng et al. (2024); Yu et al. (2019). The large number of frames increases both memory and computational requirements, making straightforward dense sampling infeasible. Moreover, crucial events may span distant timestamps, demanding high-capacity models to capture long-range dependencies Ranasinghe et al. (2025); Shi et al. (2024); Chen et al. (2024b); Weng et al. (2024). Meanwhile, the diverse and continuous visual content raises noise and distractors; thus, strategies to effectively locate or distill essential parts of the video are of primary importance Zhang et al. (2023); Cheng et al. (2024b); Xu et al. (2023); Ye et al. (2025b)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 454, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 506, + 575 + ], + "type": "text", + "content": "Existing Solutions based on VLMs typically share three core ideas: 1) video sampling or retrieval for efficiency, 2) multi-stage or interactive reasoning to handle complex questions, and 3) compact representation to accommodate the VLM's limited context window. For instance, retrieval-based pipelines partition a video into segments and employ a learned or rule-based retriever to identify the relevant chunks before passing them to a VLM Pan et al. (2023); Choudhury et al. (2023, 2025). Other lines of research compress each frame into minimal tokens to reduce computational overhead Li et al. (2024); Chen et al. (2024a); Song et al. (2024), or adopt a streaming mechanism to propagate memory representations along the temporal axis Qian et al. (2024); Wu et al. (2022); Liu et al. (2024). Beyond these efficiency-oriented approaches, LLM/VLM-as-planner frameworks factorize the process into a series of perception queries, enabling an agent to fetch additional frame-level details if needed Wang et al. (2024b); Zhang et al. (2024); Liao et al. (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 588, + 185, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 588, + 185, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 185, + 601 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "In this paper, we present Visual Semantic-Logical Search (VSLS), a novel framework that efficiently selects semantically keyframes for long video understanding by decomposing logical relationships between textual queries and visual elements. VSLS based on four defined logical dependencies (spatial co-occurrence, temporal proximity, attribute dependency, and causal order), significantly outperforms existing methods while sampling only " + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": " of video frames. The " + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "inline_equation", + "content": "8.7\\%" + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": " improvement in GPT-40's long video QA accuracy demonstrates that query-guided visual semantic logic search effectively bridges the gap between textual queries and visual content. VSLS's plug-and-play nature enables seamless integration with existing pipelines, making it practical for real-world applications. Future work could consider more logical relations, learnable search methods, enhancing interpretability, and exploring more downstream tasks." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 89, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 105, + 89, + 505, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 89, + 505, + 110 + ], + "spans": [ + { + "bbox": [ + 105, + 89, + 505, + 110 + ], + "type": "text", + "content": "Dominique Brunet, Edward R. Vrscay, and Zhou Wang. On the mathematical properties of the structural similarity index. IEEE Transactions on Image Processing, 2012." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 116, + 506, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 116, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 506, + 137 + ], + "type": "text", + "content": "Jieneng Chen, Luoxin Ye, Ju He, Zhao-Yang Wang, Daniel Khashabi, and Alan Yuille. Llavolta: Efficient multi-modal models via stage-wise visual context compression. In arXiv preprint arXiv:2406.20092, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 144, + 506, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 144, + 506, + 175 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 506, + 175 + ], + "type": "text", + "content": "Jr-Jen Chen, Yu-Chien Liao, Hsi-Che Lin, Yu-Chu Yu, Yen-Chun Chen, and Yu-Chiang Frank Wang. ReXTime: A benchmark suite for reasoning-across-time in videos. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 181, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 181, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 181, + 506, + 212 + ], + "type": "text", + "content": "Lin Chen, Xin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Zhenyu Tang, Li Yuan, et al. Sharegpt4video: Improving video understanding and generation with better captions. NeurIPS, 37:19472-19495, 2024c." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 219, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 504, + 258 + ], + "type": "text", + "content": "Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024d." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 266, + 504, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 504, + 287 + ], + "type": "text", + "content": "Tianheng Cheng, Lin Song, Yixiao Ge, Wenyu Liu, Xinggang Wang, and Ying Shan. Yolo-world: Real-time open-vocabulary object detection. CVPR, 2024a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 293, + 505, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 505, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 505, + 324 + ], + "type": "text", + "content": "Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, and Lidong Bing. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms, 2024b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 331, + 504, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 504, + 353 + ], + "type": "text", + "content": "Rohan Choudhury, Koichiro Niinuma, Kris M Kitani, and Laszlo A Jeni. Zero-shot video question answering with procedural programs. arXiv preprint arXiv:2312.00937, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 358, + 504, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 504, + 380 + ], + "type": "text", + "content": "Rohan Choudhury, Koichiro Niinuma, Kris M. Kitani, and László A. Jeni. Video question answering with procedural programs. In ECCV, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 386, + 293, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 293, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 293, + 397 + ], + "type": "text", + "content": "David Cohen. Universals in linguistic theory, 1968." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 403, + 504, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 504, + 425 + ], + "type": "text", + "content": "Yue Fan, Xiaojian Ma, Rujie Wu, Yuntao Du, Jiaqi Li, Zhi Gao, and Qing Li. Videoagent: A memory-augmented multimodal agent for video understanding. ArXiv, abs/2403.11481, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 431, + 473, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 431, + 473, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 431, + 473, + 443 + ], + "type": "text", + "content": "Charles J Fillmore. The case for case. Bach and Harms (Ed.): Universals in Linguistic Theory, 1967." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 449, + 505, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 505, + 489 + ], + "type": "text", + "content": "Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, Peixian Chen, Yanwei Li, Shaohui Lin, Sirui Zhao, Ke Li, Tong Xu, Xiawu Zheng, Enhong Chen, Rongrong Ji, and Xing Sun. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. ArXiv, abs/2405.21075, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 496, + 504, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 496, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 504, + 536 + ], + "type": "text", + "content": "Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18995-19012, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 544, + 504, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 504, + 565 + ], + "type": "text", + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 571, + 504, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 504, + 592 + ], + "type": "text", + "content": "Minkuk Kim, Hyeon Bae Kim, Jinyoung Moon, Jinwoo Choi, and Seong Tae Kim. Do you remember? dense video captioning with cross-modal memory retrieval. In CVPR, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 598, + 504, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 598, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 504, + 619 + ], + "type": "text", + "content": "Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In ECCV, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 626, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 626, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 504, + 647 + ], + "type": "text", + "content": "Jianxin Liang, Xiaojun Meng, Yueqian Wang, Chang Liu, Qun Liu, and Dongyan Zhao. End-to-end video question answering with frame scoring mechanisms and adaptive sampling. ArXiv, abs/2407.15047, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 654, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 654, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 504, + 685 + ], + "type": "text", + "content": "Ruotong Liao, Max Eler, Huiyu Wang, Guangyao Zhai, Gengyuan Zhang, Yunpu Ma, and Volker Tresp. Videoinsta: Zero-shot long video understanding via informative spatial-temporal reasoning with llms. In EMNLP Findings, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 691, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 691, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 504, + 723 + ], + "type": "text", + "content": "Shilong Liu, Hao Cheng, Haotian Liu, Hao Zhang, Feng Li, Tianhe Ren, Xueyan Zou, Jianwei Yang, Hang Su, Jun Zhu, et al. Llava-plus: Learning to use tools for creating multimodal agents. In European Conference on Computer Vision, pages 126-142. Springer, 2024." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "William C Mann and Sandra A Thompson. Rhetorical structure theory: Toward a functional theory of text organization. Text-interdisciplinary Journal for the Study of Discourse, 8(3):243-281, 1988." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 101, + 504, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 101, + 504, + 122 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 504, + 122 + ], + "type": "text", + "content": "Leland Gerson Neuberg. Causality: models, reasoning, and inference, by juda pearl, cambridge university press, 2000. Econometric Theory, 19(4):675-685, 2003." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 129, + 506, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 129, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 107, + 129, + 506, + 159 + ], + "type": "text", + "content": "Junting Pan, Ziyi Lin, Yuying Ge, Xiatian Zhu, Renrui Zhang, Yi Wang, Yu Qiao, and Hongsheng Li. Retrieving-to-answer: Zero-shot video question answering with frozen large language models. In ICCV Workshops, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 167, + 506, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 506, + 198 + ], + "type": "text", + "content": "Jong Sung Park, Kanchana Ranasinghe, Kumara Kahatapitiya, Wonjeong Ryoo, Donghyun Kim, and Michael S. Ryoo. Too many frames, not all useful: Efficient strategies for long-form video qa. ArXiv, abs/2406.09396, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 206, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 206, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 107, + 206, + 504, + 228 + ], + "type": "text", + "content": "Rui Qian, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Shuangrui Ding, Dahua Lin, and Jiaqi Wang. Streaming long video understanding with large language models. In NeurIPS, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 235, + 505, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 235, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 107, + 235, + 505, + 266 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 273, + 504, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 273, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 107, + 273, + 504, + 294 + ], + "type": "text", + "content": "Manjusha Rajan and Latha Parameswaran. Key frame extraction algorithm for surveillance videos using an evolutionary approach. Scientific Reports, 15(1):536, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 301, + 504, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 301, + 504, + 322 + ], + "spans": [ + { + "bbox": [ + 107, + 301, + 504, + 322 + ], + "type": "text", + "content": "Kanchana Ranasinghe, Xiang Li, Kumara Kahapatitiya, and Michael S Ryoo. Understanding long videos with multimodal language models. In ICLR, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 329, + 505, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 329, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 107, + 329, + 505, + 350 + ], + "type": "text", + "content": "Yudi Shi, Shangzhe Di, Qirui Chen, and Weidi Xie. Unlocking video-llm via agent-of-thoughts distillation. arXiv preprint arXiv:2412.01694, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 357, + 506, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 357, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 106, + 357, + 506, + 388 + ], + "type": "text", + "content": "Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multi-modal llms. arXiv preprint arXiv:2409.10994, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 396, + 504, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 396, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 504, + 417 + ], + "type": "text", + "content": "John F. Sowa. Knowledge Representation: Logical, Philosophical, and Computational Foundations. Brooks/Cole Publishing Co., Pacific Grove, CA, USA, 2000." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 424, + 504, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 424, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 107, + 424, + 504, + 445 + ], + "type": "text", + "content": "Leonard Talmy. Toward a Cognitive Semantics (Volume 1: Concept Structuring Systems; Volume 2: Typology and Process in Concept Structuring). MIT Press, Cambridge, MA, USA, 2000." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 453, + 505, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 453, + 505, + 474 + ], + "spans": [ + { + "bbox": [ + 107, + 453, + 505, + 474 + ], + "type": "text", + "content": "Reuben Tan, Xineng Sun, Ping Hu, Jui hsien Wang, Hanieh Deilamsalehy, Bryan A. Plummer, Bryan Russell, and Kate Saenko. Koala: Key frame-conditioned long video-llm. CVPR, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 481, + 505, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 481, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 107, + 481, + 505, + 511 + ], + "type": "text", + "content": "Yunlong Tang, Jing Bi, Siting Xu, Luchuan Song, Susan Liang, Teng Wang, Daoan Zhang, Jie An, Jingyang Lin, Rongyi Zhu, et al. Video understanding with large language models: A survey. arXiv preprint arXiv:2312.17432, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 519, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 519, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 519, + 504, + 550 + ], + "type": "text", + "content": "Hengyi Wang, Haizhou Shi, Shiwei Tan, Weiyi Qin, Wenyuan Wang, Tunyu Zhang, Akshay Nambi, Tanuja Ganu, and Hao Wang. Multimodal needle in a haystack: Benchmarking long-context capability of multimodal large language models, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 557, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 557, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 557, + 504, + 578 + ], + "type": "text", + "content": "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, 2024a." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 586, + 504, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 504, + 607 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 504, + 607 + ], + "type": "text", + "content": "Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In ECCV, pages 58-76. Springer, 2024b." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 614, + 506, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 614, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 506, + 646 + ], + "type": "text", + "content": "Zhanyu Wang, Longyue Wang, Zhen Zhao, Minghao Wu, Chenyang Lyu, Huayang Li, Deng Cai, Luping Zhou, Shuming Shi, and Zhaopeng Tu. Gpt4video: A unified multimodal large language model for Instruction-followed understanding and safety-aware generation. In ACM MM, pages 3907-3916, 2024c." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 653, + 506, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 653, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 653, + 506, + 683 + ], + "type": "text", + "content": "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. ArXiv, abs/2405.19209, 2024d." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 691, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 691, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 106, + 691, + 504, + 721 + ], + "type": "text", + "content": "Ziyang Wang, Shoubin Yu, Elias Stengel-Eskin, Jaehong Yoon, Feng Cheng, Gedas Bertasius, and Mohit Bansal. Videotree: Adaptive tree-based video representation for llm reasoning on long videos. arXiv preprint arXiv:2405.19209, 2024e." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 526 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. Longvlm: Efficient long video understanding via large language models. In European Conference on Computer Vision, pages 453-470. Springer, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 505, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 505, + 152 + ], + "type": "text", + "content": "Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13587-13597, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 158, + 506, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 158, + 506, + 171 + ], + "spans": [ + { + "bbox": [ + 107, + 158, + 506, + 171 + ], + "type": "text", + "content": "Penghao Wu and Saining Xie. V*: Guided visual search as a core mechanism in multimodal llms. CVPR, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 176, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 176, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 107, + 176, + 504, + 198 + ], + "type": "text", + "content": "Jiaqi Xu, Cuiling Lan, Wenxuan Xie, Xuejin Chen, and Yan Lu. Retrieval-based video language model for efficient long video question answering. arXiv preprint arXiv:2312.04931, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 204, + 504, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 204, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 107, + 204, + 504, + 225 + ], + "type": "text", + "content": "Jinhui Ye, Zihan Wang, and Haosen Sun. Longvideohaystack. https://huggingface.co/datasets/LVHaystack/LongVideoHaystack, 2025a. v1.0." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 232, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 232, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 232, + 504, + 262 + ], + "type": "text", + "content": "Jinhui Ye, Zihan Wang, Haosen Sun, Keshigeyan Chandrasegaran, Zane Durante, Cristobal Eyzaguirre, Yonatan Bisk, Juan Carlos Niebles, Ehsan Adeli, Li Fei-Fei, Jiajun Wu, and Manling Li. Re-thinking temporal search for long-form video understanding. In CVPR, 2025b." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 270, + 504, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 270, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 107, + 270, + 504, + 291 + ], + "type": "text", + "content": "Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. National Science Review, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 298, + 505, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 505, + 329 + ], + "type": "text", + "content": "Sicheng Yu, Chengkai Jin, Huan Wang, Zhenghao Chen, Sheng Jin, Zhongrong Zuo, Xiaolei Xu, Zhenbang Sun, Bingni Zhang, Jiawei Wu, Hao Zhang, and Qianru Sun. Frame-voyager: Learning to query frames for video large language models. ArXiv, abs/2410.03226, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 335, + 504, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 335, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 107, + 335, + 504, + 357 + ], + "type": "text", + "content": "Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 363, + 505, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 363, + 505, + 395 + ], + "spans": [ + { + "bbox": [ + 107, + 363, + 505, + 395 + ], + "type": "text", + "content": "Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, Yali Wang, Yu Qiao, and Limin Wang. Timesuite: Improving mllms for long video understanding via grounded tuning, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 401, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 401, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 401, + 504, + 422 + ], + "type": "text", + "content": "Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. In EMNLP, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 429, + 504, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 429, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 107, + 429, + 504, + 450 + ], + "type": "text", + "content": "Lu Zhang, Tiancheng Zhao, Heting Ying, Yibo Ma, and Kyusong Lee. OmAgent: A multi-modal agent framework for complex video understanding with task divide-and-conquer. In EMNLP, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 457, + 505, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 505, + 487 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 505, + 487 + ], + "type": "text", + "content": "Zijia Zhao, Haoyu Lu, Yuqi Huo, Yifan Du, Tongtian Yue, Longteng Guo, Bingning Wang, Weipeng Chen, and Jing Liu. Needle in a video haystack: A scalable synthetic evaluator for video mllms. arXiv preprint arXiv:2406.09367, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 495, + 505, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 495, + 505, + 526 + ], + "spans": [ + { + "bbox": [ + 107, + 495, + 505, + 526 + ], + "type": "text", + "content": "Heqing Zou, Tianze Luo, Guiyang Xie, Fengmao Lv, Guangcong Wang, Junyang Chen, Zhuochen Wang, Hansheng Zhang, Huajian Zhang, et al. From seconds to hours: Reviewing multimodal large language models on comprehensive long video understanding. arXiv preprint arXiv:2409.18938, 2024." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 69, + 145, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 69, + 145, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 69, + 145, + 83 + ], + "type": "text", + "content": "Part I" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 193, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 93, + 193, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 193, + 116 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 135, + 216, + 150 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 135, + 216, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 216, + 150 + ], + "type": "text", + "content": "Table of Contents" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 156, + 481, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 156, + 481, + 167 + ], + "spans": [ + { + "bbox": [ + 130, + 156, + 481, + 167 + ], + "type": "text", + "content": "A Theoretical Underpinnings of Relation Categories 14" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 169, + 481, + 203 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 143, + 169, + 481, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 169, + 481, + 179 + ], + "spans": [ + { + "bbox": [ + 143, + 169, + 481, + 179 + ], + "type": "text", + "content": "A.1 Linguistic Grounding 14" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 181, + 481, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 181, + 481, + 191 + ], + "spans": [ + { + "bbox": [ + 143, + 181, + 481, + 191 + ], + "type": "text", + "content": "A.2 Logical Grounding 14" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 193, + 481, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 193, + 481, + 203 + ], + "spans": [ + { + "bbox": [ + 143, + 193, + 481, + 203 + ], + "type": "text", + "content": "A.3 Pragmatic Completeness for VQA 14" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 213, + 481, + 245 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 130, + 213, + 481, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 213, + 481, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 213, + 481, + 224 + ], + "type": "text", + "content": "B Performance 15" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 234, + 481, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 234, + 481, + 245 + ], + "spans": [ + { + "bbox": [ + 130, + 234, + 481, + 245 + ], + "type": "text", + "content": "C Analysis of the Impact of Search Frame Count 15" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 255, + 481, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 255, + 481, + 266 + ], + "spans": [ + { + "bbox": [ + 130, + 255, + 481, + 266 + ], + "type": "text", + "content": "D Details of Datasets 16" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 267, + 481, + 313 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 143, + 267, + 481, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 267, + 481, + 277 + ], + "spans": [ + { + "bbox": [ + 143, + 267, + 481, + 277 + ], + "type": "text", + "content": "D.1 Details ofVIDEO-MME 16" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 143, + 279, + 481, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 279, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 143, + 279, + 481, + 289 + ], + "type": "text", + "content": "D.2 Details of LONGVIDEOBENCH 16" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 143, + 291, + 481, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 291, + 481, + 301 + ], + "spans": [ + { + "bbox": [ + 143, + 291, + 481, + 301 + ], + "type": "text", + "content": "D.3 Details of LV-HAYSTACK 16" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 143, + 303, + 481, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 303, + 481, + 313 + ], + "spans": [ + { + "bbox": [ + 143, + 303, + 481, + 313 + ], + "type": "text", + "content": "D.4 Details of EGO-4D 17" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 324, + 481, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 324, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 130, + 324, + 481, + 335 + ], + "type": "text", + "content": "E Detailed Algorithm 17" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 143, + 336, + 481, + 407 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 143, + 336, + 481, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 336, + 481, + 346 + ], + "spans": [ + { + "bbox": [ + 143, + 336, + 481, + 346 + ], + "type": "text", + "content": "E.1 Algorithm Overview and Core Components 17" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 143, + 348, + 481, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 348, + 481, + 358 + ], + "spans": [ + { + "bbox": [ + 143, + 348, + 481, + 358 + ], + "type": "text", + "content": "E.2 Implementation Considerations 19" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 143, + 361, + 481, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 361, + 481, + 371 + ], + "spans": [ + { + "bbox": [ + 143, + 361, + 481, + 371 + ], + "type": "text", + "content": "E.3 Computational Complexity Analysis 19" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 143, + 373, + 481, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 373, + 481, + 382 + ], + "spans": [ + { + "bbox": [ + 143, + 373, + 481, + 382 + ], + "type": "text", + "content": "E.4 Technical Implementation Details 19" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 143, + 384, + 481, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 384, + 481, + 395 + ], + "spans": [ + { + "bbox": [ + 143, + 384, + 481, + 395 + ], + "type": "text", + "content": "E.5 Practical Application Examples 21" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 143, + 396, + 481, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 396, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 143, + 396, + 481, + 407 + ], + "type": "text", + "content": "E.6 System Specifications for Reproductivity 21" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 416, + 481, + 449 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 130, + 416, + 481, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 416, + 481, + 427 + ], + "spans": [ + { + "bbox": [ + 130, + 416, + 481, + 427 + ], + "type": "text", + "content": "F Case Study of VSLS Keyframe Selection 21" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 130, + 437, + 481, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 481, + 449 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 481, + 449 + ], + "type": "text", + "content": "G Iteration Analysis 22" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 459, + 481, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 459, + 481, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 459, + 481, + 469 + ], + "type": "text", + "content": "H Prompt 23" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 143, + 471, + 481, + 494 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 143, + 471, + 481, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 471, + 481, + 481 + ], + "spans": [ + { + "bbox": [ + 143, + 471, + 481, + 481 + ], + "type": "text", + "content": "H.1 Prompt Template for Query Grounding 23" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 143, + 483, + 481, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 483, + 481, + 494 + ], + "spans": [ + { + "bbox": [ + 143, + 483, + 481, + 494 + ], + "type": "text", + "content": "H.2 Prompt Template for Question Answering 23" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 503, + 481, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 503, + 481, + 514 + ], + "spans": [ + { + "bbox": [ + 130, + 503, + 481, + 514 + ], + "type": "text", + "content": "I Limitations 24" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 130, + 525, + 481, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 525, + 481, + 535 + ], + "spans": [ + { + "bbox": [ + 130, + 525, + 481, + 535 + ], + "type": "text", + "content": "J Broader Impacts 24" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 143, + 536, + 481, + 559 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 143, + 536, + 481, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 536, + 481, + 547 + ], + "spans": [ + { + "bbox": [ + 143, + 536, + 481, + 547 + ], + "type": "text", + "content": "J.1 Positive Impacts 24" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 143, + 548, + 481, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 548, + 481, + 559 + ], + "spans": [ + { + "bbox": [ + 143, + 548, + 481, + 559 + ], + "type": "text", + "content": "J.2 Potential Considerations 24" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 384, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 384, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 384, + 85 + ], + "type": "text", + "content": "A Theoretical Underpinnings of Relation Categories" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 96, + 504, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 504, + 141 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 504, + 141 + ], + "type": "text", + "content": "Our choice of the four relation categories—spatial, temporal, attribute, and causal—is grounded in foundational concepts from linguistics and logic. While achieving absolute “completeness” in describing the infinite complexity of the real world is a formidable challenge, this selection aims to describe core aspects of events, states, and the way humans conceptualize and communicate them." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 154, + 226, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 154, + 226, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 154, + 226, + 166 + ], + "type": "text", + "content": "A.1 Linguistic Grounding" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 504, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 193 + ], + "type": "text", + "content": "Semantic Roles and Case Grammar: Theories like Fillmore's Case Grammar Fillmore (1967) analyze sentences in terms of semantic roles that nominals play in relation to the verb (the event)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 194, + 505, + 295 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 105, + 194, + 505, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 505, + 216 + ], + "type": "text", + "content": "- Spatial relations directly correspond to roles like Locative (the location of an event or state) or Path (the trajectory of motion)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 217, + 498, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 498, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 498, + 228 + ], + "type": "text", + "content": "- Temporal relations align with Temporal roles, specifying when an event occurs or its duration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 229, + 505, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 229, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 229, + 505, + 261 + ], + "type": "text", + "content": "- Attributes describe the properties of entities (participants) involved in these roles. While not direct case roles for verbs, they are fundamental for identifying and characterizing the \"who\" and \"what\" (e.g., Agent, Patient, Theme, Instrument) that possess these attributes during an event." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 262, + 504, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 504, + 295 + ], + "type": "text", + "content": "- Causal relations are central to understanding agency and event structure. Roles like Agent (the instigator of an action) or Cause (the non-volitional trigger of an event) highlight the importance of causality in linguistic descriptions of events." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 300, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 506, + 376 + ], + "type": "text", + "content": "Lexical Semantics and Event Structure: Works in lexical semantics (e.g., following Pustejovsky Cohen (1968) on the generative lexicon, or Talmy Talmy (2000) on cognitive semantics) often decompose event meaning into fundamental components. Talmy Talmy (2000), for instance, extensively discusses how language structures concepts like space, time, and force dynamics (which inherently relate to causality). Events are situated in space and time, involve entities with specific attributes, and are often linked through causal chains (e.g., one action causing another, or an agent causing a change of state)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 380, + 505, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 505, + 459 + ], + "type": "text", + "content": "Discourse Relations: Theories like Rhetorical Structure Theory (RST) Mann and Thompson (1988) identify relations that bind textual units together. Many of these fundamental relations are inherently temporal (e.g., Sequence), causal (e.g., Cause, Result, Purpose), or involve describing entities and their settings (which encompasses spatial and attributive information, often under relations like Elaboration or Background). This suggests that these four categories capture essential elements for constructing coherent descriptions and explanations, a core function of Video Question Answering (VQA)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 472, + 214, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 214, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 214, + 483 + ], + "type": "text", + "content": "A.2 Logical Grounding" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 488, + 506, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 488, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 488, + 506, + 555 + ], + "type": "text", + "content": "Predicate Logic and Knowledge Representation: In formal logic and AI knowledge representation (e.g., Sowa Sowa (2000)), events and states are often represented using predicates with arguments that specify participants, locations, times, and properties. A typical event representation might implicitly or explicitly include Location(event, place), Time(event, time_interval), HasProperty(event, attribute_value), and relations like Causes(event1, event2). Our four categories provide a high-level abstraction over these common predicate types." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 559, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 559, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 506, + 581 + ], + "type": "text", + "content": "Modal and Specialized Logics: Temporal Logic is specifically designed to reason about propositions qualified in terms of time." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 582, + 504, + 626 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 582, + 477, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 582, + 477, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 477, + 593 + ], + "type": "text", + "content": "- Spatial Logic deals with reasoning about spatial properties and relations between entities." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 594, + 504, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 504, + 626 + ], + "type": "text", + "content": "- Logics of Action and Causality (e.g., situation calculus, event calculus, or Pearl's work on causality Neuberg (2003)) explicitly model how actions bring about changes and the causal dependencies between events." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 640, + 278, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 640, + 278, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 640, + 278, + 652 + ], + "type": "text", + "content": "A.3 Pragmatic Completeness for VQA" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 661, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 661, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 506, + 683 + ], + "type": "text", + "content": "From a pragmatic standpoint, particularly for VQA, these four relations address the core \"Wh-questions\" humans often ask to understand a scene or event:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 686, + 436, + 722 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 105, + 686, + 436, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 686, + 436, + 698 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 436, + 698 + ], + "type": "text", + "content": "- What/Who? (Identifies objects/entities, often distinguished by their attributes)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 699, + 282, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 699, + 282, + 710 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 282, + 710 + ], + "type": "text", + "content": "- Where? (Answered by spatial relations)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 711, + 288, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 711, + 288, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 288, + 722 + ], + "type": "text", + "content": "- When? (Answered by temporal relations)" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "content": "- Why/How did it happen? (Often answered by causal relations or a sequence of events linked temporally and spatially)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 99, + 504, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 504, + 155 + ], + "type": "text", + "content": "While more fine-grained relations (as in Action Genome) undoubtedly provide deeper semantic detail, our chosen set aims to provide a foundational, yet computationally manageable, framework for keyframe selection based on the most common semantic and logical inferences required for a broad range of video queries. They represent a level of abstraction that is both meaningful for human queries and feasible for current visual-language models to parse and verify." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": "In essence, these categories are not arbitrary but reflect fundamental dimensions along which events and states are structured, perceived, and communicated in language and reasoned about in logic. We believe they offer a robust and broadly applicable framework for the task at hand." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 208, + 194, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 208, + 194, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 194, + 220 + ], + "type": "text", + "content": "B Performance" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 232, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 506, + 310 + ], + "type": "text", + "content": "Long-form video understanding presents unique challenges due to the complexity of temporal dynamics and cross-modal interactions in extended durations (900-3,600 seconds). Our comprehensive evaluation of the LVB-XL benchmark reveals significant performance gaps between existing approaches. While large-scale models like GPT-4O (32 frames) and INTERNVL 2.5-78B (16 frames) have demonstrated competence in short-video tasks, their direct application to long-form content (marked by circle sizes proportional to model parameters) yields suboptimal results (53.8% and 56.5% accuracy respectively)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "text", + "content": "Our Visual Semantic-Logical Search (VSLS) framework addresses these limitations. This advancement enables consistent performance improvements across different architecture scales, elevating GPT-40 to " + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "inline_equation", + "content": "54.2\\%" + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "inline_equation", + "content": "(+0.4\\mathrm{pp})" + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "text", + "content": " and achieving a remarkable " + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "inline_equation", + "content": "62.4\\%" + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "inline_equation", + "content": "(+5.9\\mathrm{pp})" + }, + { + "bbox": [ + 104, + 314, + 506, + 381 + ], + "type": "text", + "content": " for INTERNLV 2.5-78B on this benchmark. The comparative analysis further suggests that VSLS's gains become particularly pronounced when processing longer visual sequences, highlighting its effectiveness in modeling extended temporal contexts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 396, + 367, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 367, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 367, + 410 + ], + "type": "text", + "content": "C Analysis of the Impact of Search Frame Count" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 157, + 428, + 455, + 619 + ], + "blocks": [ + { + "bbox": [ + 157, + 428, + 455, + 619 + ], + "lines": [ + { + "bbox": [ + 157, + 428, + 455, + 619 + ], + "spans": [ + { + "bbox": [ + 157, + 428, + 455, + 619 + ], + "type": "image", + "image_path": "1c05698f5ebb877c71c8dc80fe7f27060313591cbce3eed88b06bbac45cee230.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 649 + ], + "type": "text", + "content": "Figure 5: Performance improvement with increasing search frames. VSLS consistently enhances accuracy and reaches near-human oracle performance at 64 frames." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 662, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 504, + 684 + ], + "type": "text", + "content": "This section investigates the impact of the number of search frames on the performance of our Visual Language Models (VLMs) in the context of LONGVIDEOBENCH." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Figure 5 in the T* framework study empirically demonstrates the non-monotonic relationship between input frame quantity and model accuracy on the LONGVIDEOBENCH XL benchmark. Through systematic experimentation across 18 state-of-the-art VLMs, this visualization reveals a critical" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "content": "phenomenon: excessive frame inputs degrade performance for models lacking temporal redundancy mitigation mechanisms." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 113, + 224, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 113, + 224, + 126 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 224, + 126 + ], + "type": "text", + "content": "D Details of Datasets" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 139, + 236, + 150 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 139, + 236, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 236, + 150 + ], + "type": "text", + "content": "D.1 Details of Video-MME" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 160, + 506, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 160, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 506, + 260 + ], + "type": "text", + "content": "The Video-MME (Video Multi-Modal Evaluation) dataset represents the first comprehensive benchmark tailored to assess the capabilities of Vision-Language Models (VLMs) in video understanding. Aiming to address limitations in existing benchmarks, it emphasizes diversity, temporal complexity, and multi-modal integration while ensuring high-quality human annotations. The dataset contains 900 carefully curated videos across six primary domains—Knowledge, Film and Television, Sports Competition, Artistic Performance, Life Record, and Multilingual—with 30 fine-grained subcategories such as astronomy, esports, and documentaries. These videos vary significantly in duration, ranging from short clips (11 seconds) to long-form content (up to 1 hour), enabling robust evaluation across temporal scales." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 264, + 506, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 506, + 342 + ], + "type": "text", + "content": "Each video is paired with expert-annotated multiple-choice questions (2,700 QA pairs in total), rigorously validated to ensure clarity and reliance on visual or multi-modal context. Questions span 12 task types, including action recognition, temporal reasoning, and domain-specific knowledge, with a focus on scenarios where answers cannot be inferred from text alone. To quantify temporal complexity, the dataset introduces certificate length analysis, revealing that answering questions often requires understanding extended video segments (e.g., median lengths of 26 seconds for short videos and 890.7 seconds for long videos), surpassing the demands of prior benchmarks like EGOSchema." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 346, + 506, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 506, + 401 + ], + "type": "text", + "content": "VIDEO-MME serves as a universal benchmark, applicable to both image- and video-focused MLLMs, and exposes key challenges for future research. These include improving architectures for long-sequence processing, developing datasets for complex temporal reasoning, and enhancing cross-modal alignment. By providing a rigorous evaluation framework,VIDEO-MME aims to drive progress toward MLLMs capable of understanding dynamic, real-world scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 417, + 264, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 264, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 264, + 428 + ], + "type": "text", + "content": "D.2 Details of LONGVIDEOBENCH" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 438, + 506, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 506, + 537 + ], + "type": "text", + "content": "The LONGVIDEOBENCH benchmark pioneers the evaluation of long-context interleaved video-language understanding in VLMs, addressing critical gaps in existing benchmarks through its focus on detailed retrieval and temporal reasoning over hour-long multimodal inputs. Designed to overcome the \"single-frame bias\" prevalent in prior video benchmarks, the novel referring reasoning paradigm enables models to locate and analyze specific contexts within extended sequences. The data set comprises 3,763 web-sourced videos that span various themes - movies, news, life vlogs, and knowledge domains (including art, history, and STEM) - with durations progressively grouped into four levels: 8-15 seconds, 15-60 seconds, 3-10 minutes, and 15-60 minutes. Each video is paired with aligned subtitles, forming interleaved multimodal inputs that mimic real-world viewing scenarios." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 542, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 506, + 631 + ], + "type": "text", + "content": "The benchmark features 6,678 human-annotated multiple-choice questions categorized into 17 fine-grained task types across two levels: Perception (requiring object/attribute recognition in single scenes) and Relation (demanding temporal/causal reasoning across multiple scenes). Questions incorporate explicit referring queries (e.g., \"When the woman descends the rocky hill...\") that anchor reasoning to specific video moments, with an average question length of 43.5 words to ensure precision. Temporal complexity is quantified through duration-grouped analysis, where models must process up to 256 frames (at 1 fps) for hour-long videos, significantly exceeding the demands of predecessors like EGOSchema (180s videos)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 646, + 241, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 241, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 241, + 657 + ], + "type": "text", + "content": "D.3 Details of LV-HAYSTACK" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 723 + ], + "type": "text", + "content": "The LV-HAYSTACK benchmark establishes the first comprehensive evaluation framework for temporal search in long-form video understanding, addressing critical limitations in existing synthetic needle-in-haystack benchmarks through real-world video annotations and multi-dimensional evaluation metrics. Designed to assess models' ability to locate minimal keyframe sets (typically 1-5 frames) from hour-long videos containing tens of thousands of frames, the dataset comprises 3,874 human" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "annotated instances spanning 150 hours of video content across two distinct categories: egocentric videos from EGO4D (101 hours) and allocentric videos from LONGVIDEOBENCH (57.7 hours)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 100, + 504, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 504, + 133 + ], + "type": "text", + "content": "Organized into HAYSTACK-EGO4D and HAYSTACK-LVBENCH subsets, the benchmark features videos averaging 24.8 minutes in length (max 60 minutes) with 44,717 frames per video. Each instance contains:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 137, + 504, + 183 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 105, + 137, + 482, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 137, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 482, + 148 + ], + "type": "text", + "content": "- Expert-curated multi-choice questions requiring temporal reasoning (15.9 questions/video);" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 148, + 504, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 504, + 171 + ], + "type": "text", + "content": "- Human-annotated keyframe sets (4.7 frames/question for egocentric, 1.8 frames/question for allocentric);" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 171, + 389, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 389, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 389, + 183 + ], + "type": "text", + "content": "- Temporal and visual similarity metrics for precise search evaluation." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 199, + 212, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 199, + 212, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 212, + 210 + ], + "type": "text", + "content": "D.4 Details of EGO-4D" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 220, + 506, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 309 + ], + "type": "text", + "content": "The EGO4D (Egocentric Computer Vision Benchmark) dataset establishes a transformative foundation for advancing research in first-person visual perception through unprecedented scale, diversity, and multi-modal integration. Designed to overcome limitations in existing egocentric datasets, it captures 3,670 hours of unscripted daily activities from 931 participants across 74 global locations and 9 countries, spanning household, workplace, leisure, and outdoor scenarios. The dataset features " + }, + { + "bbox": [ + 104, + 220, + 506, + 309 + ], + "type": "inline_equation", + "content": "30+" + }, + { + "bbox": [ + 104, + 220, + 506, + 309 + ], + "type": "text", + "content": " fine-grained activity categories including carpentry, social gaming, and meal preparation, with videos ranging from brief interactions (8-minute clips) to extended continuous recordings (up to 10 hours), enabling comprehensive analysis of long-term behavioral patterns." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 314, + 506, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 369 + ], + "type": "text", + "content": "Each video is enriched with multi-modal annotations totaling 3.85 million dense textual narrations (13.2 sentences/minute), coupled with 3D environment meshes, eye gaze tracking, stereo vision, and synchronized multi-camera views. Rigorous privacy protocols ensure ethical data collection, with 612 hours containing unblurred faces/audio for social interaction studies. The benchmark suite introduces five core tasks organized across temporal dimensions:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 373, + 505, + 453 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 373, + 504, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 373, + 504, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 504, + 394 + ], + "type": "text", + "content": "- Episodic Memory: Temporal localization of natural language queries (74K instances) and 3D object tracking using Matterport scans;" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 396, + 505, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 505, + 418 + ], + "type": "text", + "content": "- **Hand-Object Interaction:** State change detection (1.3M annotations) with PNR (point-of-no-return) temporal localization;" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 418, + 504, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 418, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 504, + 440 + ], + "type": "text", + "content": "- Social Understanding: Audio-visual diarisation (2,535h audio) and gaze-directed communication analysis;" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 441, + 453, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 453, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 453, + 453 + ], + "type": "text", + "content": "- Action Forecasting: Anticipation of locomotion trajectories and object interactions." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "type": "text", + "content": "Quantitative analysis reveals the dataset's complexity: hand-object interactions involve 1,772 unique verbs and 4,336 nouns, while social scenarios contain 6.8 participant interactions per minute on average. Multi-modal fusion experiments demonstrate performance gains, with 3D environment context improving object localization accuracy by " + }, + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "type": "inline_equation", + "content": "18.7\\%" + }, + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "type": "text", + "content": " compared to RGB-only baselines. State-of-the-art models achieve " + }, + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "type": "inline_equation", + "content": "68.9\\%" + }, + { + "bbox": [ + 104, + 456, + 506, + 523 + ], + "type": "text", + "content": " accuracy in action anticipation tasks, yet struggle with long-term forecasting (41.2% accuracy for 5s predictions), highlighting critical challenges in temporal reasoning." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 528, + 506, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 605 + ], + "type": "text", + "content": "EGO4D's unique integration of egocentric video with complementary modalities (IMU data in 836h, gaze tracking in 45h) enables novel research directions in embodied AI and augmented reality. The dataset exposes fundamental limitations in current architectures, particularly in processing hour-long video contexts and synthesizing cross-modal signals—only " + }, + { + "bbox": [ + 104, + 528, + 506, + 605 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 104, + 528, + 506, + 605 + ], + "type": "text", + "content": " of tested models effectively utilized audio-visual synchronization cues. By providing standardized evaluation protocols and curated challenge subsets, EGO4D serves as a universal testbed for developing perceptive systems capable of understanding persistent 3D environments and complex human behaviors." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 624, + 227, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 624, + 227, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 227, + 636 + ], + "type": "text", + "content": "E Detailed Algorithm" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 651, + 343, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 343, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 343, + 662 + ], + "type": "text", + "content": "The detailed VSLS algorithm is represented in Algorithm 2." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 678, + 318, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 318, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 318, + 690 + ], + "type": "text", + "content": "E.1 Algorithm Overview and Core Components" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "The algorithm operates as an adaptive search framework that intelligently explores video content (represented as set " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": ") to locate frames matching semantic-logical query requirements " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "(Q)" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": ". Unlike" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 88, + 504, + 355 + ], + "blocks": [ + { + "bbox": [ + 106, + 75, + 356, + 87 + ], + "lines": [ + { + "bbox": [ + 106, + 75, + 356, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 75, + 356, + 87 + ], + "type": "text", + "content": "Algorithm 2: The completed Visual Semantic-Logical Search" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "lines": [ + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": "Function SemanticLogicalTemporalSearch(V,Q,K, " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Delta_t,\\tau ,\\alpha ,\\gamma" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\mathcal{O},\\mathcal{R}\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " ParseQuestion(Q); // Extract key/cue objects and relationships \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "P\\leftarrow" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " Uniform, " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "B\\leftarrow |V|,S\\leftarrow \\emptyset ,N_{v}\\leftarrow |V|" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " // Initialize distribution and state \nwhile " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "B > 0" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "|\\mathcal{O}| > 0" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "k\\gets \\lfloor \\sqrt{B}\\rfloor ,G\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " Grid(Sample " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "(P,k^2)" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " ); // Adaptive grid sampling \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega \\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " DetectObjects(G); // Detect objects in sampled frames \nforeach " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "g\\in G" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "C_g\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " CalculateBaseScore( " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega [g])" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " ; // Base detection confidence \nforeach " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "r\\in \\mathcal{R}" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " do if r.type " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " Spatial then " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{spatial}}\\cdot" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " CheckSpatialRelationship(r, " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega [g])" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " else if r.type " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " Temporal then " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{time}}\\cdot" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " CheckTemporalRelationship(r, " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega ,\\Delta_t)" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " else if r.type " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " Causal then " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{causal}}\\cdot" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " CheckCausalRelationship(r, " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " ) else if r.type " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " Attribute then " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "C_g\\gets C_g + \\alpha \\gamma_{\\mathrm{attr}}\\cdot" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " CheckAttributeRelationship(r, " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega [g],\\tau" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " UpdateScores(S,g,Cg); // Update global score registry DiffuseScores(S,w); // Temporal context propagation " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "P\\gets" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " NormalizeDistribution(S), " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "B\\gets B - k^{2}" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " // Update sampling distribution foreach " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "g\\in \\operatorname {TopK}(S,K)" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " do if " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\Omega [g]\\cap \\mathcal{O}\\neq \\emptyset" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "inline_equation", + "content": "\\begin{array}{rl}{\\mathcal{O}}&{\\leftarrow\\mathcal{O}\\backslash\\Omega[g]}\\end{array}" + }, + { + "bbox": [ + 106, + 88, + 504, + 355 + ], + "type": "text", + "content": " // Remove identified key objects \nreturn TopK(S,K); // Return top-K keyframes" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 387, + 504, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 410 + ], + "type": "text", + "content": "traditional linear search methods, it employs a probabilistic sampling strategy that dynamically adjusts based on confidence scores from multiple relationship types." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "type": "text", + "content": "Initialization Phase The process begins by parsing the input query " + }, + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "type": "text", + "content": " into two fundamental components:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 453, + 492, + 477 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 453, + 297, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 297, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 297, + 464 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 105, + 453, + 297, + 464 + ], + "type": "inline_equation", + "content": "\\mathcal{O}" + }, + { + "bbox": [ + 105, + 453, + 297, + 464 + ], + "type": "text", + "content": ": A set of key objects or entities to identify" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 465, + 492, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 492, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 492, + 477 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 105, + 465, + 492, + 477 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 105, + 465, + 492, + 477 + ], + "type": "text", + "content": ": A collection of relationships (spatial, temporal, causal, and attribute) that must be satisfied" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "text", + "content": "The algorithm initializes with a uniform probability distribution " + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "inline_equation", + "content": "(P)" + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "text", + "content": " across all video frames, establishing a budget " + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "inline_equation", + "content": "(B)" + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "text", + "content": " equivalent to the total number of frames " + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "inline_equation", + "content": "(|V|)" + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "text", + "content": ", and creating an empty score registry " + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "inline_equation", + "content": "(S)" + }, + { + "bbox": [ + 104, + 480, + 506, + 525 + ], + "type": "text", + "content": " to track confidence values. This approach ensures unbiased initial exploration before evidence-guided refinement." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 542, + 506, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 506, + 599 + ], + "type": "text", + "content": "Adaptive Sampling Strategy Rather than exhaustively processing every frame, the algorithm employs a square-root scaling sampling strategy where " + }, + { + "bbox": [ + 104, + 542, + 506, + 599 + ], + "type": "inline_equation", + "content": "k = \\lfloor \\sqrt{B} \\rfloor" + }, + { + "bbox": [ + 104, + 542, + 506, + 599 + ], + "type": "text", + "content": " determines the sampling density. This provides a mathematical balance between exploration breadth and computational efficiency. The Grid function organizes sampled frames into a structured representation that preserves spatial-temporal relationships, facilitating subsequent relationship analysis." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 616, + 505, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 505, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 505, + 661 + ], + "type": "text", + "content": "Multi-modal Object Detection The DetectObjects function applies state-of-the-art computer vision techniques to identify objects within each sampled frame. This step leverages deep neural networks pre-trained on diverse visual datasets, enabling recognition of a wide range of entities with their corresponding confidence scores and spatial locations within frames." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "Score Propagation and Distribution Update The DiffuseScores function implements a temporal context propagation mechanism that spreads confidence values to neighboring frames, acknowledging that relevant content likely extends beyond individual frames. This diffusion creates a smoothed confidence landscape that guides subsequent sampling." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "After each iteration, the algorithm normalizes the accumulated scores to form an updated probability distribution, focusing future sampling on promising regions while maintaining exploration potential in unexamined areas." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 118, + 417, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 118, + 417, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 417, + 129 + ], + "type": "text", + "content": "Convergence Criteria and Termination The search continues until either:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 496, + 156 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 105, + 133, + 496, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 496, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 496, + 144 + ], + "type": "text", + "content": "- The sampling budget " + }, + { + "bbox": [ + 105, + 133, + 496, + 144 + ], + "type": "inline_equation", + "content": "(B)" + }, + { + "bbox": [ + 105, + 133, + 496, + 144 + ], + "type": "text", + "content": " is exhausted, indicating comprehensive coverage of the video content" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 145, + 465, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 145, + 465, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 465, + 156 + ], + "type": "text", + "content": "- All target objects " + }, + { + "bbox": [ + 105, + 145, + 465, + 156 + ], + "type": "inline_equation", + "content": "(\\mathcal{O})" + }, + { + "bbox": [ + 105, + 145, + 465, + 156 + ], + "type": "text", + "content": " have been successfully identified at satisfactory confidence levels" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 160, + 504, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 160, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 504, + 183 + ], + "type": "text", + "content": "This dual-termination approach balances thoroughness with efficiency, preventing unnecessary computation once objectives are met." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 194, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 239 + ], + "type": "text", + "content": "Result Generation The algorithm concludes by returning the top-K frames with the highest confidence scores, representing the most relevant video segments that satisfy the semantic-logical query requirements. These keyframes provide a concise summary of the content matching the user's information needs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 251, + 268, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 268, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 268, + 264 + ], + "type": "text", + "content": "E.2 Implementation Considerations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 273, + 403, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 273, + 403, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 403, + 285 + ], + "type": "text", + "content": "The algorithm's performance depends on several configurable parameters:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 287, + 324, + 335 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 287, + 324, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 287, + 324, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 324, + 299 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 105, + 287, + 324, + 299 + ], + "type": "inline_equation", + "content": "\\Delta_{t}" + }, + { + "bbox": [ + 105, + 287, + 324, + 299 + ], + "type": "text", + "content": ": Temporal window size for relationship analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 300, + 304, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 300, + 304, + 311 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 304, + 311 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 106, + 300, + 304, + 311 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 106, + 300, + 304, + 311 + ], + "type": "text", + "content": ": Confidence threshold for attribute matching" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 312, + 271, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 312, + 271, + 323 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 271, + 323 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 106, + 312, + 271, + 323 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 106, + 312, + 271, + 323 + ], + "type": "text", + "content": ": Global relationship influence factor" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 323, + 266, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 323, + 266, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 266, + 335 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 106, + 323, + 266, + 335 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 106, + 323, + 266, + 335 + ], + "type": "text", + "content": ": Type-specific relationship weights" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 338, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 506, + 384 + ], + "type": "text", + "content": "These parameters can be tuned based on application requirements, video characteristics, and computational constraints. The algorithm's modular design allows for straightforward substitution of specific component implementations (e.g., different object detectors or relationship checkers) without altering the overall framework." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 396, + 288, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 288, + 408 + ], + "type": "text", + "content": "E.3 Computational Complexity Analysis" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "content": "The time complexity scales with " + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "inline_equation", + "content": "O(\\sqrt{N})" + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "content": " is the total number of frames, significantly improving upon linear approaches. Space complexity remains " + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "content": " to maintain the probability distribution and score registry. The algorithm intelligently balances exploration and exploitation through its adaptive sampling approach, making it particularly suitable for large-scale video analysis tasks where exhaustive processing would be prohibitive." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 486, + 276, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 486, + 276, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 276, + 498 + ], + "type": "text", + "content": "E.4 Technical Implementation Details" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 506, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 540 + ], + "type": "text", + "content": "Object Detection and Feature Extraction To achieve real-time performance, the object detection module utilizes pre-trained deep convolutional neural network architectures, particularly variants based on FAST R-CNN andYOLO series. The system employs a two-stage detection strategy:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 544, + 504, + 578 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 105, + 544, + 467, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 467, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 467, + 555 + ], + "type": "text", + "content": "- Preliminary Detection: Using lightweight models to rapidly identify potential regions;" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 555, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 578 + ], + "type": "text", + "content": "- Fine-grained Classification: Applying more sophisticated models for detailed classification on high-confidence regions." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "type": "text", + "content": "The feature extraction process leverages self-attention mechanisms from Visual Transformers (ViT), generating rich semantic embeddings robust to various visual variations such as scale, rotation, and illumination. Each identified object is associated with a feature vector " + }, + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "type": "inline_equation", + "content": "f_{i} \\in \\mathbb{R}^{d}" + }, + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "type": "inline_equation", + "content": "d = 512" + }, + { + "bbox": [ + 104, + 582, + 506, + 627 + ], + "type": "text", + "content": " represents the dimensionality of the embedding space." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 638, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 638, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 638, + 504, + 661 + ], + "type": "text", + "content": "Mathematical Formulations for Relationship Assessment The evaluation of various relationship types is based on precise mathematical definitions:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "text", + "content": "Spatial Relationships Given bounding boxes " + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "inline_equation", + "content": "B_{i} = (x_{i},y_{i},w_{i},h_{i})" + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "inline_equation", + "content": "B_{j} = (x_{j},y_{j},w_{j},h_{j})" + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "text", + "content": " for two objects, the confidence for a spatial relationship " + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "inline_equation", + "content": "r_{\\text{spatial}}" + }, + { + "bbox": [ + 104, + 672, + 504, + 696 + ], + "type": "text", + "content": " is calculated as:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 203, + 710, + 505, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 710, + 505, + 724 + ], + "spans": [ + { + "bbox": [ + 203, + 710, + 505, + 724 + ], + "type": "interline_equation", + "content": "C _ {\\text {s p a t i a l}} \\left(B _ {i}, B _ {j}, r\\right) = \\phi_ {r} \\left(B _ {i}, B _ {j}\\right) \\cdot \\psi \\left(B _ {i}\\right) \\cdot \\psi \\left(B _ {j}\\right), \\tag {12}", + "image_path": "fdc82addde7f6fa12197b2b446eeb2d46ee9ef9d934d08e9e4d05c2517b9e0f2.jpg" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "type": "inline_equation", + "content": "\\phi_r" + }, + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "type": "text", + "content": " is a relationship-specific compatibility function and " + }, + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 72, + 506, + 95 + ], + "type": "text", + "content": " is the object detection confidence. For example, the compatibility for a \"contains\" relationship is defined as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 236, + 110, + 505, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 110, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 236, + 110, + 505, + 137 + ], + "type": "interline_equation", + "content": "\\phi_ {\\text {c o n t a i n s}} \\left(B _ {i}, B _ {j}\\right) = \\frac {\\operatorname {I o U} \\left(B _ {i} , B _ {j}\\right)}{\\operatorname {A r e a} \\left(B _ {j}\\right)}. \\tag {13}", + "image_path": "ccaa6d3ddd7d2546164640711ddc2250c2b915f71d03072139e00436c2ce2805.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "type": "text", + "content": "Temporal Relationships Temporal relationships are calculated by evaluating object behavior patterns across a sequence of frames " + }, + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "type": "inline_equation", + "content": "\\{F_t, F_{t+1}, \\dots, F_{t+\\Delta_t}\\}" + }, + { + "bbox": [ + 104, + 148, + 504, + 172 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 199, + 186, + 505, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 186, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 199, + 186, + 505, + 220 + ], + "type": "interline_equation", + "content": "C _ {\\text {t e m p o r a l}} \\left(O _ {i}, O _ {j}, r, \\Delta_ {t}\\right) = \\prod_ {k = 0} ^ {\\Delta_ {t} - 1} T _ {r} \\left(O _ {i} ^ {t + k}, O _ {j} ^ {t + k + 1}\\right), \\tag {14}", + "image_path": "695a4ea89f1390a6314d0efb1c2f03296b29ca46622c4f759c167008eba24d34.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "inline_equation", + "content": "T_{r}" + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "text", + "content": " is a relationship-specific temporal transition matrix and " + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "inline_equation", + "content": "O_{i}^{t}" + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "text", + "content": " represents the state of object " + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 230, + 504, + 253 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 289 + ], + "type": "text", + "content": "Causal Relationships Causal relationships utilize a Bayesian network framework to compute conditional probabilities:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 212, + 304, + 505, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 304, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 212, + 304, + 505, + 331 + ], + "type": "interline_equation", + "content": "C _ {\\text {c a u s a l}} \\left(E _ {i}, E _ {j}\\right) = P \\left(E _ {j} \\mid E _ {i}\\right) \\cdot \\log \\frac {P \\left(E _ {j} \\mid E _ {i}\\right)}{P \\left(E _ {j}\\right)}, \\tag {15}", + "image_path": "4a28afb109fe09584067f27ec21de26be56af1ca14d3475e8f6132495f5cdf7a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "type": "inline_equation", + "content": "E_{j}" + }, + { + "bbox": [ + 104, + 340, + 440, + 353 + ], + "type": "text", + "content": " represent the presumed cause event and effect event, respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 365, + 504, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 388 + ], + "type": "text", + "content": "Attribute Relationships Attribute evaluation employs cosine similarity metrics between feature vectors and attribute prototypes:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 223, + 405, + 505, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 405, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 223, + 405, + 505, + 418 + ], + "type": "interline_equation", + "content": "C _ {\\text {a t t r}} \\left(O _ {i}, a\\right) = \\max \\left(0, \\cos \\left(f _ {i}, p _ {a}\\right) - \\tau\\right), \\tag {16}", + "image_path": "676bbf3304df1cb8bf6e1d45a0d6fb99a711d39f00a7dac0b26af20143b03b71.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "inline_equation", + "content": "p_a" + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "text", + "content": " is the prototype vector for attribute " + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 428, + 466, + 440 + ], + "type": "text", + "content": " is the minimum similarity threshold." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 504, + 475 + ], + "type": "text", + "content": "Score Propagation Algorithm Temporal score propagation is implemented through a weighted diffusion process, analogous to heat diffusion on a graph structure:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 233, + 490, + 505, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 490, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 233, + 490, + 505, + 517 + ], + "type": "interline_equation", + "content": "S ^ {\\prime} (t) = S (t) + \\sum_ {k \\in \\mathcal {N} (t)} w _ {k, t} \\cdot S (k), \\tag {17}", + "image_path": "a0ece509b574cfba939c9f0b341a7663e61092f677c80d6f88407e3aa6baeb1d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(t)" + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "text", + "content": " represents the temporal neighborhood of frame " + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "inline_equation", + "content": "w_{k,t}" + }, + { + "bbox": [ + 104, + 528, + 504, + 551 + ], + "type": "text", + "content": " is a weight based on temporal distance, defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 249, + 565, + 505, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 565, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 249, + 565, + 505, + 594 + ], + "type": "interline_equation", + "content": "w _ {k, t} = \\exp \\left(- \\frac {\\left| k - t \\right| ^ {2}}{2 \\sigma^ {2}}\\right), \\tag {18}", + "image_path": "a968b2ea382466d089a06a218564465d838639afd95d1fa4a4ea5ed058508ec0.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 602, + 255, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 255, + 614 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 255, + 614 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 602, + 255, + 614 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 602, + 255, + 614 + ], + "type": "text", + "content": " controls the diffusion range." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "type": "text", + "content": "Adaptive Sampling Optimization The sampling strategy is further improved through a dynamically adjusted Thompson sampling method, modeling the probability distribution " + }, + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 627, + 504, + 661 + ], + "type": "text", + "content": " as a Beta distribution with shape parameters updated through previous observations:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 202, + 675, + 505, + 700 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 675, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 202, + 675, + 505, + 700 + ], + "type": "interline_equation", + "content": "P (t) \\sim \\operatorname {B e t a} \\left(\\alpha_ {t} + \\sum_ {i} S _ {i} (t), \\beta_ {t} + n - \\sum_ {i} S _ {i} (t)\\right), \\tag {19}", + "image_path": "b6b19c0e6bd22751c5f1e0df89f4485608192d639321a8a37dddaf8a1c2ea6bb.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "inline_equation", + "content": "\\alpha_{t}" + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "inline_equation", + "content": "\\beta_{t}" + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "text", + "content": " are prior hyperparameters and " + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 711, + 446, + 723 + ], + "type": "text", + "content": " is the total number of observations." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 267, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 267, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 267, + 85 + ], + "type": "text", + "content": "E.5 Practical Application Examples" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 504, + 118 + ], + "type": "text", + "content": "In practical visual search scenarios, the algorithm processes complex queries such as \"a person wearing a blue shirt sits down at a table and then picks up a coffee cup\":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 505, + 202 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 121, + 505, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 155 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 155 + ], + "type": "text", + "content": "- Query parsing identifies key objects (person, shirt, table, coffee cup) and relationships (blue attribute, sitting action, temporal before-after relation, spatial proximity);" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 156, + 373, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 156, + 373, + 166 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 373, + 166 + ], + "type": "text", + "content": "- Adaptive sampling selects representative frames from the video;" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 167, + 390, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 167, + 390, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 390, + 178 + ], + "type": "text", + "content": "- Multi-rerelationship evaluation integrates various sources of evidence;" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 179, + 464, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 179, + 464, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 179, + 464, + 190 + ], + "type": "text", + "content": "- Score propagation establishes a unified confidence landscape across related frame sets;" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 191, + 476, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 191, + 476, + 202 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 476, + 202 + ], + "type": "text", + "content": "- Result generation provides a concise summary of the most relevant segments in the video." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 205, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 504, + 239 + ], + "type": "text", + "content": "This semantic-logical-temporal search framework represents a significant advancement in multimodal content retrieval, enabling natural language queries that incorporate complex relationships across objects, time, and causal chains." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 258, + 305, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 305, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 305, + 270 + ], + "type": "text", + "content": "E.6 System Specifications for Reproductivity" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 280, + 506, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 280, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 506, + 336 + ], + "type": "text", + "content": "Our experiments were conducted on high-performance servers, each equipped with either an Intel(R) Xeon(R) Platinum 8378A CPU @ 3.00GHz or an Intel(R) Xeon(R) Platinum 8358P CPU @ 2.60GHz, 1TB of RAM, and 4/6 NVIDIA A800 GPUs with 80GB memory. Machines with 4 GPUs are configured with the SXM4 version, while those with 6 GPUs use the PCIe version. The software environment included Python 3.11, PyTorch 2.4, and NCCL 2.21.5 for reactivity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 357, + 329, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 329, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 329, + 371 + ], + "type": "text", + "content": "F Case Study of VSLS Keyframe Selection" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 147, + 398, + 462, + 571 + ], + "blocks": [ + { + "bbox": [ + 147, + 398, + 462, + 571 + ], + "lines": [ + { + "bbox": [ + 147, + 398, + 462, + 571 + ], + "spans": [ + { + "bbox": [ + 147, + 398, + 462, + 571 + ], + "type": "image", + "image_path": "94d01838d210618c918d4b0d5acc496b6d56923e612c4a95d392bb84b400ab9a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 578, + 506, + 634 + ], + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 634 + ], + "type": "text", + "content": "Figure 6: Qualitative comparison of frame selection strategies demonstrates VSLS's ability to pinpoint query-critical moments (e.g., the subject presenting pink objects) with temporal precision, while baseline approaches exhibit color misinterpretation (brown) due to suboptimal frame choices. VSLS maintains superior temporal diversity and content relevance, effectively avoiding the redundant selections observed in comparative methods." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 651, + 506, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 506, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 506, + 706 + ], + "type": "text", + "content": "As shown in Figure 6, the VSLS framework demonstrates its effectiveness through a video question-answering case study involving temporal handwriting analysis. The experiment focuses on distinguishing between two sequential events: a brown pen writing \"guitar\" at 2 seconds and a pink pen rewriting the same word at 3 seconds, with the query requiring identification of the second occurrence's pen color." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 711, + 382, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 711, + 382, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 382, + 723 + ], + "type": "text", + "content": "VSLS's analytical process unfolds through three interpretable phases:" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 174 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 106 + ], + "type": "text", + "content": "- Semantic Logic Extraction: Identifies core visual entities (handwritten text, pen, paper) and constructs temporal relationships through triplet formulation: (text, time, pen), establishing the framework for tracking writing instrument changes;" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 106, + 505, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 106, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 505, + 140 + ], + "type": "text", + "content": "- Temporal Relevance Scoring: The gray relevance curve reveals precise temporal localization, with peak scores aligning perfectly with ground truth positions at 2s and 3s, contrasting sharply with baseline methods' random fluctuations;" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 140, + 504, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 140, + 504, + 174 + ], + "spans": [ + { + "bbox": [ + 106, + 140, + 504, + 174 + ], + "type": "text", + "content": "- Search Pattern Visualization: Demonstrates VSLS's focused inspection near critical moments versus uniform sampling's scattered temporal coverage, explaining the baseline's failure to detect the pink pen." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 177, + 414, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 177, + 414, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 414, + 189 + ], + "type": "text", + "content": "This case study yields two critical insights about VSLS's temporal reasoning:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 192, + 504, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 192, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 504, + 214 + ], + "type": "text", + "content": "- Sequential Event Disambiguation: The system successfully differentiates between near-identical visual events through:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 215, + 504, + 261 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 115, + 215, + 364, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 215, + 364, + 226 + ], + "spans": [ + { + "bbox": [ + 115, + 215, + 364, + 226 + ], + "type": "text", + "content": "- First writing instance: Brown pen detection(false positive);" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 227, + 363, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 227, + 363, + 238 + ], + "spans": [ + { + "bbox": [ + 115, + 227, + 363, + 238 + ], + "type": "text", + "content": "- Second writing instance: Pink pen detection(true positive)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 238, + 504, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 238, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 504, + 261 + ], + "type": "text", + "content": "- Explanation of answer generation disparity: VSLS produces the correct answer (\"Pink\") versus uniform sampling's erroneous baseline (\"Brown\") due to temporal reasoning failures." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 264, + 505, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 505, + 308 + ], + "type": "text", + "content": "The spatial-temporal alignment between relevance peaks and ground truth positions confirms VSLS's unique capacity to synchronize semantic logic with visual evidence flow. This case particularly highlights the method's superiority in scenarios requiring precise discrimination of recurrent events with subtle visual variations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 324, + 221, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 221, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 221, + 338 + ], + "type": "text", + "content": "G Iteration Analysis" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 211, + 354, + 402, + 450 + ], + "blocks": [ + { + "bbox": [ + 211, + 354, + 402, + 450 + ], + "lines": [ + { + "bbox": [ + 211, + 354, + 402, + 450 + ], + "spans": [ + { + "bbox": [ + 211, + 354, + 402, + 450 + ], + "type": "image", + "image_path": "a12bb641ca82972e0549859e62e742682b2319ef656a04a9604927b07e64f4ec.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 456, + 506, + 489 + ], + "lines": [ + { + "bbox": [ + 104, + 456, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 506, + 489 + ], + "type": "text", + "content": "Figure 7: The comparative visualization of iteration counts on the medium-length video subset of the VIDEO-MME dataset demonstrates that our method consistently requires a higher number of iterations compared to the T* approach." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 501, + 505, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 501, + 505, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 505, + 536 + ], + "type": "text", + "content": "As shown in Fig 7, incorporating relations into the search algorithm will increase the average number of iterations for the video of medium length in the Video-MME dataset from 15.9 to 23.8. The overall distribution of video iteration will not be significantly changed." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 170, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 170, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 170, + 85 + ], + "type": "text", + "content": "H Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 303, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 93, + 303, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 303, + 106 + ], + "type": "text", + "content": "H.1 Prompt Template for Query Grounding" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 303, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 303, + 124 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 303, + 124 + ], + "type": "text", + "content": "Here is the prompt we used for query grounding." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 130, + 280, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 130, + 280, + 144 + ], + "spans": [ + { + "bbox": [ + 121, + 130, + 280, + 144 + ], + "type": "text", + "content": "Prompt Template for Query Grounding" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 152, + 337, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 152, + 337, + 164 + ], + "spans": [ + { + "bbox": [ + 121, + 152, + 337, + 164 + ], + "type": "text", + "content": "Analyze the following video frames and the question:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 164, + 218, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 164, + 218, + 175 + ], + "spans": [ + { + "bbox": [ + 121, + 164, + 218, + 175 + ], + "type": "text", + "content": "Question: " + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 175, + 208, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 175, + 208, + 185 + ], + "spans": [ + { + "bbox": [ + 121, + 175, + 208, + 185 + ], + "type": "text", + "content": "Options: <0options>" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 186, + 257, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 186, + 257, + 196 + ], + "spans": [ + { + "bbox": [ + 121, + 186, + 257, + 196 + ], + "type": "text", + "content": "Step 1: Key Object Identification" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 196, + 380, + 230 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 132, + 196, + 357, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 196, + 357, + 207 + ], + "spans": [ + { + "bbox": [ + 132, + 196, + 357, + 207 + ], + "type": "text", + "content": "- Extract 3-5 core objects detectable by computer vision" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 208, + 380, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 208, + 380, + 219 + ], + "spans": [ + { + "bbox": [ + 132, + 208, + 380, + 219 + ], + "type": "text", + "content": "- Use YOLO-compatible noun phrases (e.g., \"person\", \"mic\")" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 219, + 301, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 219, + 301, + 230 + ], + "spans": [ + { + "bbox": [ + 132, + 219, + 301, + 230 + ], + "type": "text", + "content": "- Format: Key Objects: obj1, obj2, obj3" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 230, + 222, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 230, + 222, + 239 + ], + "spans": [ + { + "bbox": [ + 121, + 230, + 222, + 239 + ], + "type": "text", + "content": "Step 2: Contextual Cues" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 240, + 451, + 273 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 132, + 240, + 451, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 240, + 451, + 251 + ], + "spans": [ + { + "bbox": [ + 132, + 240, + 451, + 251 + ], + "type": "text", + "content": "- List 2-4 scene elements that help locate key objects based on options provided" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 251, + 324, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 251, + 324, + 262 + ], + "spans": [ + { + "bbox": [ + 132, + 251, + 324, + 262 + ], + "type": "text", + "content": "- Use detectable items (avoid abstract concepts)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 262, + 301, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 262, + 301, + 273 + ], + "spans": [ + { + "bbox": [ + 132, + 262, + 301, + 273 + ], + "type": "text", + "content": "- Format: Cue Objects: cue1, cue2, cue3" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 273, + 239, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 273, + 239, + 284 + ], + "spans": [ + { + "bbox": [ + 121, + 273, + 239, + 284 + ], + "type": "text", + "content": "Step 3: Relationship Triplets" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 285, + 216, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 285, + 216, + 295 + ], + "spans": [ + { + "bbox": [ + 132, + 285, + 216, + 295 + ], + "type": "text", + "content": "- Relationship types:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 142, + 296, + 432, + 338 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 142, + 296, + 338, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 296, + 338, + 306 + ], + "spans": [ + { + "bbox": [ + 142, + 296, + 338, + 306 + ], + "type": "text", + "content": "- Spatial: Objects must appear in the same frame" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 142, + 306, + 432, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 306, + 432, + 317 + ], + "spans": [ + { + "bbox": [ + 142, + 306, + 432, + 317 + ], + "type": "text", + "content": "- Attribute: Color/size/material descriptions (e.g., \"red clothes\", \"large\")" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 142, + 317, + 367, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 317, + 367, + 327 + ], + "spans": [ + { + "bbox": [ + 142, + 317, + 367, + 327 + ], + "type": "text", + "content": "- Time: Appear in different frames within a few seconds" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 142, + 327, + 364, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 327, + 364, + 338 + ], + "spans": [ + { + "bbox": [ + 142, + 327, + 364, + 338 + ], + "type": "text", + "content": "- Causal: There is a temporal order between the objects" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 338, + 488, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 338, + 488, + 360 + ], + "spans": [ + { + "bbox": [ + 121, + 338, + 488, + 360 + ], + "type": "text", + "content": "- Format of Relations: (object, relation_type, object), relation_type should be exactly one of spatial/attribute/time/causal" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 361, + 181, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 361, + 181, + 371 + ], + "spans": [ + { + "bbox": [ + 121, + 361, + 181, + 371 + ], + "type": "text", + "content": "Output Rules" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 122, + 371, + 488, + 426 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 132, + 371, + 443, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 371, + 443, + 382 + ], + "spans": [ + { + "bbox": [ + 132, + 371, + 443, + 382 + ], + "type": "text", + "content": "1. One line each for Key Objects/Cue Objects/Rel starting with exact prefixes" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 132, + 382, + 488, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 382, + 488, + 393 + ], + "spans": [ + { + "bbox": [ + 132, + 382, + 488, + 393 + ], + "type": "text", + "content": "2. Separate items with comma except for triplets where items are separated by semicolon" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 132, + 393, + 361, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 393, + 361, + 404 + ], + "spans": [ + { + "bbox": [ + 132, + 393, + 361, + 404 + ], + "type": "text", + "content": "3. Never use markdown or natural language explanations" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 122, + 404, + 488, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 404, + 488, + 426 + ], + "spans": [ + { + "bbox": [ + 122, + 404, + 488, + 426 + ], + "type": "text", + "content": "4. If you cannot identify any key objects or cue objects from the video provided, please just identify the possible key or cue objects from the question and options provided" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 426, + 285, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 426, + 285, + 437 + ], + "spans": [ + { + "bbox": [ + 121, + 426, + 285, + 437 + ], + "type": "text", + "content": "Below is an example of the procedure:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 132, + 437, + 430, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 437, + 430, + 448 + ], + "spans": [ + { + "bbox": [ + 132, + 437, + 430, + 448 + ], + "type": "text", + "content": "Question: For \"When does the person in red clothes appear with the dog?\"" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 132, + 448, + 174, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 448, + 174, + 458 + ], + "spans": [ + { + "bbox": [ + 132, + 448, + 174, + 458 + ], + "type": "text", + "content": "Response:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 142, + 459, + 449, + 491 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 142, + 459, + 324, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 459, + 324, + 470 + ], + "spans": [ + { + "bbox": [ + 142, + 459, + 324, + 470 + ], + "type": "text", + "content": "Key Objects: person, dog, red clothes" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 142, + 470, + 328, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 470, + 328, + 480 + ], + "spans": [ + { + "bbox": [ + 142, + 470, + 328, + 480 + ], + "type": "text", + "content": "Cue Objects: grassy_area, leash, fence" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 142, + 481, + 449, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 481, + 449, + 491 + ], + "spans": [ + { + "bbox": [ + 142, + 481, + 449, + 491 + ], + "type": "text", + "content": "Rel: (person; attribute; red clothes), (person; spatial; dog)" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 491, + 362, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 491, + 362, + 502 + ], + "spans": [ + { + "bbox": [ + 121, + 491, + 362, + 502 + ], + "type": "text", + "content": "Format your response EXACTLY like this in three lines:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 142, + 502, + 324, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 502, + 324, + 514 + ], + "spans": [ + { + "bbox": [ + 142, + 502, + 324, + 514 + ], + "type": "text", + "content": "Key Objects: object1, object2, object" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 142, + 514, + 323, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 514, + 323, + 524 + ], + "spans": [ + { + "bbox": [ + 142, + 514, + 323, + 524 + ], + "type": "text", + "content": "Cue Objects: object1, object2, object" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 122, + 525, + 488, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 525, + 488, + 546 + ], + "spans": [ + { + "bbox": [ + 122, + 525, + 488, + 546 + ], + "type": "text", + "content": "Rel: (object1; relation_type1; object2), (object3; relation_type2; object4)" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 568, + 313, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 313, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 313, + 582 + ], + "type": "text", + "content": "H.2 Prompt Template for Question Answering" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 586, + 313, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 313, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 313, + 597 + ], + "type": "text", + "content": "Here is the prompt we used for question answering." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 121, + 605, + 291, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 605, + 291, + 618 + ], + "spans": [ + { + "bbox": [ + 121, + 605, + 291, + 618 + ], + "type": "text", + "content": "Prompt Template for Question Answering" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 121, + 627, + 459, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 627, + 459, + 639 + ], + "spans": [ + { + "bbox": [ + 121, + 627, + 459, + 639 + ], + "type": "text", + "content": "Select the best answer to the following multiple-choice question based on the video." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 121, + 639, + 159, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 639, + 159, + 649 + ], + "spans": [ + { + "bbox": [ + 121, + 639, + 159, + 649 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 121, + 651, + 159, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 651, + 159, + 661 + ], + "spans": [ + { + "bbox": [ + 121, + 651, + 159, + 661 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 121, + 663, + 135, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 663, + 135, + 668 + ], + "spans": [ + { + "bbox": [ + 121, + 663, + 135, + 668 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 121, + 671, + 217, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 671, + 217, + 681 + ], + "spans": [ + { + "bbox": [ + 121, + 671, + 217, + 681 + ], + "type": "text", + "content": "Question: " + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 121, + 682, + 208, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 682, + 208, + 693 + ], + "spans": [ + { + "bbox": [ + 121, + 682, + 208, + 693 + ], + "type": "text", + "content": "Options: <0options>" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 121, + 693, + 374, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 693, + 374, + 704 + ], + "spans": [ + { + "bbox": [ + 121, + 693, + 374, + 704 + ], + "type": "text", + "content": "Answer with the option's letter from the given choices directly." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 121, + 704, + 418, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 704, + 418, + 715 + ], + "spans": [ + { + "bbox": [ + 121, + 704, + 418, + 715 + ], + "type": "text", + "content": "Your response format should be strictly an upper case letter A,B,C,D or E." + } + ] + } + ], + "index": 52 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 53 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 185, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 185, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 185, + 83 + ], + "type": "text", + "content": "I Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 174 + ], + "type": "text", + "content": "Despite the promising results of our VSLS framework, we acknowledge several limitations: First, although our approach reduces the required frame sampling to just " + }, + { + "bbox": [ + 104, + 95, + 506, + 174 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 104, + 95, + 506, + 174 + ], + "type": "text", + "content": ", the computational complexity remains a consideration for extremely long videos, with a search overhead of approximately 7.8 seconds. This may present challenges for real-time or low-latency applications. Besides, the performance of VSLS is bounded by the capabilities of the underlying object detector (YOLO-WORLD). Detection accuracy may degrade under challenging visual conditions such as poor lighting, occlusion, or unusual camera angles, potentially affecting temporal coverage." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 188, + 214, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 188, + 214, + 202 + ], + "spans": [ + { + "bbox": [ + 105, + 188, + 214, + 202 + ], + "type": "text", + "content": "J Broader Impacts" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 236 + ], + "type": "text", + "content": "Our Visual Semantic-Logical Search (VSLS) framework primarily offers positive societal impacts as a foundational algorithm for efficient keyframe selection in long videos." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 247, + 201, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 201, + 260 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 201, + 260 + ], + "type": "text", + "content": "J.1 Positive Impacts" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 264, + 505, + 375 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 106, + 264, + 505, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 264, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 505, + 285 + ], + "type": "text", + "content": "- Educational Applications: VSLS enables students and educators to quickly locate relevant segments in instructional videos, improving learning efficiency for visual content." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 287, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 287, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 287, + 504, + 308 + ], + "type": "text", + "content": "- Research Enhancement: Scientists across disciplines can benefit from more efficient analysis of video archives, particularly those studying behavioral patterns or analyzing historical footage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 310, + 504, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 310, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 310, + 504, + 342 + ], + "type": "text", + "content": "- Computational Efficiency: By sampling only " + }, + { + "bbox": [ + 106, + 310, + 504, + 342 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 106, + 310, + 504, + 342 + ], + "type": "text", + "content": " of frames on average, our approach reduces computational requirements and energy consumption, contributing to more sustainable AI applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 344, + 504, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 344, + 504, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 504, + 375 + ], + "type": "text", + "content": "- Accessibility: Our framework can be integrated into assistive technologies for individuals with cognitive processing challenges, helping them identify and focus on critical moments in video content." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 390, + 235, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 235, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 235, + 402 + ], + "type": "text", + "content": "J.2 Potential Considerations" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 410, + 504, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 504, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 504, + 433 + ], + "type": "text", + "content": "As a foundational algorithm, VSLS has limited direct negative impacts. However, like any computer vision technology, applications built upon it should be mindful of general considerations:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 436, + 506, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 506, + 471 + ], + "type": "text", + "content": "- Underlying Model Biases: The performance of VSLS depends partly on object detection systems (e.g.,YOLO-World), so it inherits any limitations or biases present in these components. Our modular design allows for substitution with improved detection systems as they become available." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 238, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 238, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 238, + 85 + ], + "type": "text", + "content": "NeurIPS Paper Checklist" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 129, + 92, + 175, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 92, + 175, + 103 + ], + "spans": [ + { + "bbox": [ + 129, + 92, + 175, + 103 + ], + "type": "text", + "content": "1. Claims" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 107, + 504, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 107, + 504, + 130 + ], + "spans": [ + { + "bbox": [ + 140, + 107, + 504, + 130 + ], + "type": "text", + "content": "Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 134, + 202, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 134, + 202, + 145 + ], + "spans": [ + { + "bbox": [ + 141, + 134, + 202, + 145 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 148, + 506, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 148, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 140, + 148, + 506, + 237 + ], + "type": "text", + "content": "Justification: The abstract and introduction clearly state the main contributions of our work, including (1) the proposal of a semantics-driven keyframe search framework using four logical relations, (2) performance gains on multiple long video QA benchmarks, (3) efficient frame sampling " + }, + { + "bbox": [ + 140, + 148, + 506, + 237 + ], + "type": "inline_equation", + "content": "(1.4\\%)" + }, + { + "bbox": [ + 140, + 148, + 506, + 237 + ], + "type": "text", + "content": " with state-of-the-art results, and (4) plug-and-play compatibility with VLM/LLM pipelines. These claims are supported by both the method and experimental sections (see Sections \"Introduction\", \"Method\", and \"Experiment\"), and limitations are discussed in the main paper and Appendix I. The claims are fully aligned with the presented theoretical and empirical results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 240, + 189, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 240, + 189, + 251 + ], + "spans": [ + { + "bbox": [ + 141, + 240, + 189, + 251 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 253, + 504, + 354 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 141, + 253, + 504, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 253, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 141, + 253, + 504, + 275 + ], + "type": "text", + "content": "- The answer NA means that the abstract and introduction do not include the claims made in the paper." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 275, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 275, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 141, + 275, + 504, + 308 + ], + "type": "text", + "content": "- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 309, + 504, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 309, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 141, + 309, + 504, + 331 + ], + "type": "text", + "content": "- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 332, + 504, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 332, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 141, + 332, + 504, + 354 + ], + "type": "text", + "content": "- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 358, + 194, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 358, + 194, + 369 + ], + "spans": [ + { + "bbox": [ + 129, + 358, + 194, + 369 + ], + "type": "text", + "content": "2. Limitations" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 373, + 492, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 373, + 492, + 385 + ], + "spans": [ + { + "bbox": [ + 140, + 373, + 492, + 385 + ], + "type": "text", + "content": "Question: Does the paper discuss the limitations of the work performed by the authors?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 388, + 202, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 388, + 202, + 399 + ], + "spans": [ + { + "bbox": [ + 141, + 388, + 202, + 399 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 140, + 403, + 382, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 403, + 382, + 415 + ], + "spans": [ + { + "bbox": [ + 140, + 403, + 382, + 415 + ], + "type": "text", + "content": "Justification: The paper discusses limitations in Appendix I." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 419, + 189, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 419, + 189, + 430 + ], + "spans": [ + { + "bbox": [ + 141, + 419, + 189, + 430 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 431, + 504, + 723 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 141, + 431, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 431, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 141, + 431, + 504, + 453 + ], + "type": "text", + "content": "- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 454, + 486, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 454, + 486, + 465 + ], + "spans": [ + { + "bbox": [ + 141, + 454, + 486, + 465 + ], + "type": "text", + "content": "- The authors are encouraged to create a separate \"Limitations\" section in their paper." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 466, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 466, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 141, + 466, + 504, + 520 + ], + "type": "text", + "content": "- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 521, + 504, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 521, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 141, + 521, + 504, + 553 + ], + "type": "text", + "content": "- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 555, + 504, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 555, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 141, + 555, + 504, + 610 + ], + "type": "text", + "content": "- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 611, + 504, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 611, + 504, + 632 + ], + "spans": [ + { + "bbox": [ + 141, + 611, + 504, + 632 + ], + "type": "text", + "content": "- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 633, + 504, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 633, + 504, + 655 + ], + "spans": [ + { + "bbox": [ + 141, + 633, + 504, + 655 + ], + "type": "text", + "content": "- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 656, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 656, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 141, + 656, + 504, + 723 + ], + "type": "text", + "content": "- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 128, + 72, + 279, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 72, + 279, + 84 + ], + "spans": [ + { + "bbox": [ + 128, + 72, + 279, + 84 + ], + "type": "text", + "content": "3. Theory assumptions and proofs" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 140, + 88, + 504, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 88, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 140, + 88, + 504, + 111 + ], + "type": "text", + "content": "Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 115, + 202, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 115, + 202, + 126 + ], + "spans": [ + { + "bbox": [ + 140, + 115, + 202, + 126 + ], + "type": "text", + "content": "Answer: [NA]" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 131, + 506, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 131, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 140, + 131, + 506, + 176 + ], + "type": "text", + "content": "Justification: The paper does not include formal theoretical results, theorems, or proofs. Our work is primarily methodological and experimental; all mathematical formulations are used to describe the algorithm and its components, but no formal theorems are claimed or proved. Therefore, this item is not applicable." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 180, + 189, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 180, + 189, + 190 + ], + "spans": [ + { + "bbox": [ + 140, + 180, + 189, + 190 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 193, + 505, + 307 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 141, + 193, + 443, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 193, + 443, + 203 + ], + "spans": [ + { + "bbox": [ + 141, + 193, + 443, + 203 + ], + "type": "text", + "content": "- The answer NA means that the paper does not include theoretical results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 205, + 505, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 205, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 141, + 205, + 505, + 224 + ], + "type": "text", + "content": "- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 227, + 503, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 227, + 503, + 238 + ], + "spans": [ + { + "bbox": [ + 141, + 227, + 503, + 238 + ], + "type": "text", + "content": "- All assumptions should be clearly stated or referenced in the statement of any theorems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 239, + 504, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 239, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 141, + 239, + 504, + 272 + ], + "type": "text", + "content": "- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 273, + 503, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 273, + 503, + 295 + ], + "spans": [ + { + "bbox": [ + 141, + 273, + 503, + 295 + ], + "type": "text", + "content": "- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 296, + 473, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 296, + 473, + 307 + ], + "spans": [ + { + "bbox": [ + 141, + 296, + 473, + 307 + ], + "type": "text", + "content": "- Theorems and Lemmas that the proof relies upon should be properly referenced." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 128, + 312, + 296, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 312, + 296, + 323 + ], + "spans": [ + { + "bbox": [ + 128, + 312, + 296, + 323 + ], + "type": "text", + "content": "4. Experimental result reproducibility" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 139, + 327, + 506, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 327, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 139, + 327, + 506, + 361 + ], + "type": "text", + "content": "Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 140, + 365, + 202, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 365, + 202, + 376 + ], + "spans": [ + { + "bbox": [ + 140, + 365, + 202, + 376 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 139, + 380, + 506, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 380, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 139, + 380, + 506, + 459 + ], + "type": "text", + "content": "Justification: The paper provides comprehensive details required for reproducibility, including descriptions of all datasets used (see Section \"Details of Datasets\" and Appendix D), implementation details of the proposed algorithm (see \"Method\" and \"Algorithm Overview\"), hyperparameter choices, prompt templates (Appendix \"Prompt\"), and evaluation protocols for each experiment. We also specify the object detection models and baselines used, and state that the code will be publicly released. This level of detail allows other researchers to replicate the main experiments and validate our claims." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 140, + 463, + 190, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 463, + 190, + 473 + ], + "spans": [ + { + "bbox": [ + 140, + 463, + 190, + 473 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 475, + 504, + 543 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 141, + 475, + 421, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 475, + 421, + 486 + ], + "spans": [ + { + "bbox": [ + 141, + 475, + 421, + 486 + ], + "type": "text", + "content": "- The answer NA means that the paper does not include experiments." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 487, + 504, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 487, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 141, + 487, + 504, + 520 + ], + "type": "text", + "content": "- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 521, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 521, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 141, + 521, + 504, + 543 + ], + "type": "text", + "content": "- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 141, + 544, + 505, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 544, + 505, + 642 + ], + "spans": [ + { + "bbox": [ + 141, + 544, + 505, + 642 + ], + "type": "text", + "content": "- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 643, + 505, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 643, + 505, + 675 + ], + "spans": [ + { + "bbox": [ + 141, + 643, + 505, + 675 + ], + "type": "text", + "content": "- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 148, + 677, + 504, + 722 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 148, + 677, + 504, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 677, + 504, + 699 + ], + "spans": [ + { + "bbox": [ + 148, + 677, + 504, + 699 + ], + "type": "text", + "content": "(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 148, + 700, + 504, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 148, + 700, + 504, + 722 + ], + "type": "text", + "content": "(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 149, + 72, + 504, + 173 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 149, + 72, + 504, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 72, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 149, + 72, + 504, + 116 + ], + "type": "text", + "content": "(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 149, + 117, + 504, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 117, + 504, + 173 + ], + "spans": [ + { + "bbox": [ + 149, + 117, + 504, + 173 + ], + "type": "text", + "content": "(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 128, + 175, + 269, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 175, + 269, + 186 + ], + "spans": [ + { + "bbox": [ + 128, + 175, + 269, + 186 + ], + "type": "text", + "content": "5. Open access to data and code" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 190, + 506, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 190, + 506, + 223 + ], + "spans": [ + { + "bbox": [ + 140, + 190, + 506, + 223 + ], + "type": "text", + "content": "Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 227, + 202, + 238 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 227, + 202, + 238 + ], + "spans": [ + { + "bbox": [ + 140, + 227, + 202, + 238 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 241, + 506, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 241, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 140, + 241, + 506, + 308 + ], + "type": "text", + "content": "Justification: We state in the abstract and main text that the code will be publicly released. All datasets used in our experiments are from public benchmarks (LONGVIDEOBENCH,VIDEO-MME, HAYSTACK-LVBENCH, EGO4D), and details for data access are provided in Appendix D. Instructions for running our framework, data preparation, and experiment replication will be included in the released code repository. Thus, researchers will be able to access both code and data with clear instructions for full reproducibility." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 311, + 190, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 311, + 190, + 321 + ], + "spans": [ + { + "bbox": [ + 140, + 311, + 190, + 321 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 323, + 504, + 538 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 141, + 323, + 467, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 323, + 467, + 334 + ], + "spans": [ + { + "bbox": [ + 141, + 323, + 467, + 334 + ], + "type": "text", + "content": "- The answer NA means that paper does not include experiments requiring code." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 335, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 335, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 141, + 335, + 504, + 357 + ], + "type": "text", + "content": "- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 357, + 504, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 357, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 141, + 357, + 504, + 401 + ], + "type": "text", + "content": "- While we encourage the release of code and data, we understand that this might not be possible, so \"No\" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 403, + 504, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 403, + 504, + 435 + ], + "spans": [ + { + "bbox": [ + 141, + 403, + 504, + 435 + ], + "type": "text", + "content": "- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 436, + 504, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 436, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 141, + 436, + 504, + 458 + ], + "type": "text", + "content": "- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 459, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 459, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 141, + 459, + 504, + 491 + ], + "type": "text", + "content": "- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 492, + 504, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 492, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 141, + 492, + 504, + 514 + ], + "type": "text", + "content": "- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 515, + 504, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 515, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 141, + 515, + 504, + 538 + ], + "type": "text", + "content": "- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 128, + 540, + 264, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 540, + 264, + 552 + ], + "spans": [ + { + "bbox": [ + 128, + 540, + 264, + 552 + ], + "type": "text", + "content": "6. Experimental setting/details" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 140, + 555, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 555, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 140, + 555, + 506, + 588 + ], + "type": "text", + "content": "Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 592, + 202, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 592, + 202, + 604 + ], + "spans": [ + { + "bbox": [ + 140, + 592, + 202, + 604 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 140, + 606, + 506, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 606, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 140, + 606, + 506, + 672 + ], + "type": "text", + "content": "Justification: The paper specifies all relevant experimental details, including descriptions of dataset splits, hyperparameters, evaluation metrics, and prompt templates (see \"Experiment,\" Table captions, and Appendix D). As our method is training-free, we clarify in the main text which components rely on pre-trained models and explicitly describe all parameter settings for reproducibility. This ensures that readers can fully understand and interpret the reported results." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 140, + 676, + 190, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 676, + 190, + 686 + ], + "spans": [ + { + "bbox": [ + 140, + 676, + 190, + 686 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 688, + 504, + 723 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 141, + 688, + 421, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 688, + 421, + 699 + ], + "spans": [ + { + "bbox": [ + 141, + 688, + 421, + 699 + ], + "type": "text", + "content": "- The answer NA means that the paper does not include experiments." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 141, + 700, + 504, + 723 + ], + "type": "text", + "content": "- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 141, + 72, + 504, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 72, + 504, + 94 + ], + "spans": [ + { + "bbox": [ + 141, + 72, + 504, + 94 + ], + "type": "text", + "content": "- The full details can be provided either with the code, in appendix, or as supplemental material." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 129, + 99, + 289, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 99, + 289, + 111 + ], + "spans": [ + { + "bbox": [ + 129, + 99, + 289, + 111 + ], + "type": "text", + "content": "7. Experiment statistical significance" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 115, + 504, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 115, + 504, + 138 + ], + "spans": [ + { + "bbox": [ + 140, + 115, + 504, + 138 + ], + "type": "text", + "content": "Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 143, + 200, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 143, + 200, + 154 + ], + "spans": [ + { + "bbox": [ + 140, + 143, + 200, + 154 + ], + "type": "text", + "content": "Answer: [No]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 158, + 504, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 158, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 140, + 158, + 504, + 224 + ], + "type": "text", + "content": "Justification: The paper does not report error bars or formal statistical significance tests for the main experimental results, as our approach is deterministic and uses fixed dataset splits and pre-trained models. Metrics are reported as single values following common practice in recent long video QA benchmarks. While this is standard in the area, we acknowledge that including error bars or additional significance analysis would further strengthen the experimental evaluation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 229, + 190, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 229, + 190, + 239 + ], + "spans": [ + { + "bbox": [ + 140, + 229, + 190, + 239 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 242, + 504, + 468 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 141, + 242, + 422, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 242, + 422, + 253 + ], + "spans": [ + { + "bbox": [ + 141, + 242, + 422, + 253 + ], + "type": "text", + "content": "- The answer NA means that the paper does not include experiments." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 254, + 504, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 254, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 141, + 254, + 504, + 287 + ], + "type": "text", + "content": "- The authors should answer \"Yes\" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 288, + 504, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 288, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 141, + 288, + 504, + 320 + ], + "type": "text", + "content": "- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 321, + 504, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 321, + 504, + 343 + ], + "spans": [ + { + "bbox": [ + 141, + 321, + 504, + 343 + ], + "type": "text", + "content": "- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 344, + 449, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 344, + 449, + 355 + ], + "spans": [ + { + "bbox": [ + 141, + 344, + 449, + 355 + ], + "type": "text", + "content": "- The assumptions made should be given (e.g., Normally distributed errors)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 356, + 504, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 356, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 141, + 356, + 504, + 377 + ], + "type": "text", + "content": "- It should be clear whether the error bar is the standard deviation or the standard error of the mean." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 379, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 379, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 141, + 379, + 504, + 411 + ], + "type": "text", + "content": "- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a " + }, + { + "bbox": [ + 141, + 379, + 504, + 411 + ], + "type": "inline_equation", + "content": "96\\%" + }, + { + "bbox": [ + 141, + 379, + 504, + 411 + ], + "type": "text", + "content": " CI, if the hypothesis of Normality of errors is not verified." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 412, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 412, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 141, + 412, + 504, + 445 + ], + "type": "text", + "content": "- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 446, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 446, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 141, + 446, + 504, + 468 + ], + "type": "text", + "content": "- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 473, + 280, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 473, + 280, + 485 + ], + "spans": [ + { + "bbox": [ + 129, + 473, + 280, + 485 + ], + "type": "text", + "content": "8. Experiments compute resources" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 140, + 489, + 505, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 489, + 505, + 522 + ], + "spans": [ + { + "bbox": [ + 140, + 489, + 505, + 522 + ], + "type": "text", + "content": "Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 140, + 527, + 202, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 527, + 202, + 539 + ], + "spans": [ + { + "bbox": [ + 140, + 527, + 202, + 539 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 543, + 504, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 543, + 504, + 598 + ], + "spans": [ + { + "bbox": [ + 140, + 543, + 504, + 598 + ], + "type": "text", + "content": "Justification: The paper specifies the computing environment in Appendix E.6, and reports both latency and FLOPs for major baselines and our method in Table 1. We also provide the number of iterations, average processing time, and model sizes in the main text and tables. This information is sufficient for others to estimate compute requirements and reproduce the experiments." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 140, + 602, + 190, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 602, + 190, + 613 + ], + "spans": [ + { + "bbox": [ + 140, + 602, + 190, + 613 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 615, + 504, + 706 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 141, + 615, + 422, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 615, + 422, + 627 + ], + "spans": [ + { + "bbox": [ + 141, + 615, + 422, + 627 + ], + "type": "text", + "content": "- The answer NA means that the paper does not include experiments." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 628, + 504, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 628, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 141, + 628, + 504, + 649 + ], + "type": "text", + "content": "- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 650, + 504, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 650, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 141, + 650, + 504, + 672 + ], + "type": "text", + "content": "- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 673, + 504, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 673, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 141, + 673, + 504, + 706 + ], + "type": "text", + "content": "- The paper should disclose whether the full research project required more compute than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper)." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 129, + 711, + 204, + 721 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 711, + 204, + 721 + ], + "spans": [ + { + "bbox": [ + 129, + 711, + 204, + 721 + ], + "type": "text", + "content": "9. Code of ethics" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 140, + 72, + 504, + 95 + ], + "type": "text", + "content": "Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 140, + 99, + 202, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 99, + 202, + 110 + ], + "spans": [ + { + "bbox": [ + 140, + 99, + 202, + 110 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 139, + 114, + 506, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 114, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 139, + 114, + 506, + 170 + ], + "type": "text", + "content": "Justification: The research follows the NeurIPS Code of Ethics. All datasets used are publicly available, appropriately licensed, and include human annotation with proper privacy safeguards (see Appendix D). No personally identifiable information or sensitive data is used. The proposed methods and experiments present no foreseeable risk of harm, discrimination, or privacy violation. Anonymity is preserved in all supplementary materials." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 172, + 189, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 172, + 189, + 182 + ], + "spans": [ + { + "bbox": [ + 140, + 172, + 189, + 182 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 141, + 184, + 504, + 241 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 141, + 184, + 496, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 184, + 496, + 194 + ], + "spans": [ + { + "bbox": [ + 141, + 184, + 496, + 194 + ], + "type": "text", + "content": "- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 196, + 504, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 196, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 141, + 196, + 504, + 217 + ], + "type": "text", + "content": "- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 219, + 504, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 219, + 504, + 241 + ], + "spans": [ + { + "bbox": [ + 141, + 219, + 504, + 241 + ], + "type": "text", + "content": "- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction)." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 124, + 244, + 216, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 244, + 216, + 256 + ], + "spans": [ + { + "bbox": [ + 124, + 244, + 216, + 256 + ], + "type": "text", + "content": "10. Broader impacts" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 260, + 504, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 260, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 140, + 260, + 504, + 283 + ], + "type": "text", + "content": "Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 140, + 285, + 202, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 285, + 202, + 297 + ], + "spans": [ + { + "bbox": [ + 140, + 285, + 202, + 297 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 300, + 405, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 300, + 405, + 312 + ], + "spans": [ + { + "bbox": [ + 140, + 300, + 405, + 312 + ], + "type": "text", + "content": "Justification: Our paper discusses broader impacts in Appendix J." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 316, + 190, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 316, + 190, + 326 + ], + "spans": [ + { + "bbox": [ + 140, + 316, + 190, + 326 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 328, + 504, + 573 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 141, + 328, + 462, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 328, + 462, + 338 + ], + "spans": [ + { + "bbox": [ + 141, + 328, + 462, + 338 + ], + "type": "text", + "content": "- The answer NA means that there is no societal impact of the work performed." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 340, + 504, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 340, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 141, + 340, + 504, + 361 + ], + "type": "text", + "content": "- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 363, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 363, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 141, + 363, + 504, + 407 + ], + "type": "text", + "content": "- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 407, + 504, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 407, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 141, + 407, + 504, + 483 + ], + "type": "text", + "content": "- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 485, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 485, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 141, + 485, + 504, + 529 + ], + "type": "text", + "content": "- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 529, + 504, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 529, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 141, + 529, + 504, + 573 + ], + "type": "text", + "content": "- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML)." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 124, + 577, + 192, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 577, + 192, + 588 + ], + "spans": [ + { + "bbox": [ + 124, + 577, + 192, + 588 + ], + "type": "text", + "content": "11. Safeguards" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 140, + 592, + 505, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 592, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 140, + 592, + 505, + 626 + ], + "type": "text", + "content": "Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 140, + 629, + 202, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 629, + 202, + 640 + ], + "spans": [ + { + "bbox": [ + 140, + 629, + 202, + 640 + ], + "type": "text", + "content": "Answer: [NA]" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 140, + 643, + 504, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 643, + 504, + 710 + ], + "spans": [ + { + "bbox": [ + 140, + 643, + 504, + 710 + ], + "type": "text", + "content": "Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 711, + 382, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 711, + 382, + 723 + ], + "spans": [ + { + "bbox": [ + 141, + 711, + 382, + 723 + ], + "type": "text", + "content": "- The answer NA means that the paper poses no such risks." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 141, + 72, + 504, + 173 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 141, + 72, + 504, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 72, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 141, + 72, + 504, + 116 + ], + "type": "text", + "content": "- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 118, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 118, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 141, + 118, + 504, + 140 + ], + "type": "text", + "content": "- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 140, + 504, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 140, + 504, + 173 + ], + "spans": [ + { + "bbox": [ + 141, + 140, + 504, + 173 + ], + "type": "text", + "content": "- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 124, + 177, + 257, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 177, + 257, + 190 + ], + "spans": [ + { + "bbox": [ + 124, + 177, + 257, + 190 + ], + "type": "text", + "content": "12. Licenses for existing assets" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 193, + 504, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 193, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 140, + 193, + 504, + 227 + ], + "type": "text", + "content": "Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 230, + 201, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 230, + 201, + 242 + ], + "spans": [ + { + "bbox": [ + 141, + 230, + 201, + 242 + ], + "type": "text", + "content": "Answer: [NA]" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 245, + 504, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 245, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 140, + 245, + 504, + 312 + ], + "type": "text", + "content": "Justification: Our work introduces a semantic-logical search framework for keyframe selection that builds upon existing object detection models and benchmarks. It does not release new datasets scraped from the internet or high-risk generative models. While our method improves video understanding capabilities, it doesn't introduce fundamentally new capabilities that would require specific safeguards beyond those already in place for the underlying technologies (such as YOLO-World) that we utilize." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 316, + 189, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 316, + 189, + 326 + ], + "spans": [ + { + "bbox": [ + 141, + 316, + 189, + 326 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 328, + 504, + 498 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 141, + 328, + 414, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 328, + 414, + 339 + ], + "spans": [ + { + "bbox": [ + 141, + 328, + 414, + 339 + ], + "type": "text", + "content": "- The answer NA means that the paper does not use existing assets." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 340, + 490, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 340, + 490, + 351 + ], + "spans": [ + { + "bbox": [ + 141, + 340, + 490, + 351 + ], + "type": "text", + "content": "- The authors should cite the original paper that produced the code package or dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 352, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 352, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 141, + 352, + 504, + 373 + ], + "type": "text", + "content": "- The authors should state which version of the asset is used and, if possible, include a URL." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 374, + 459, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 374, + 459, + 385 + ], + "spans": [ + { + "bbox": [ + 141, + 374, + 459, + 385 + ], + "type": "text", + "content": "- The name of the license (e.g., CC-BY 4.0) should be included for each asset." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 387, + 504, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 387, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 141, + 387, + 504, + 408 + ], + "type": "text", + "content": "- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 410, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 410, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 141, + 410, + 504, + 453 + ], + "type": "text", + "content": "- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 454, + 504, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 454, + 504, + 476 + ], + "spans": [ + { + "bbox": [ + 141, + 454, + 504, + 476 + ], + "type": "text", + "content": "- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 477, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 477, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 141, + 477, + 504, + 498 + ], + "type": "text", + "content": "- If this information is not available online, the authors are encouraged to reach out to the asset's creators." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 124, + 503, + 190, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 503, + 190, + 514 + ], + "spans": [ + { + "bbox": [ + 124, + 503, + 190, + 514 + ], + "type": "text", + "content": "13. New assets" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 140, + 518, + 504, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 518, + 504, + 542 + ], + "spans": [ + { + "bbox": [ + 140, + 518, + 504, + 542 + ], + "type": "text", + "content": "Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 545, + 201, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 545, + 201, + 557 + ], + "spans": [ + { + "bbox": [ + 141, + 545, + 201, + 557 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 140, + 561, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 561, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 140, + 561, + 506, + 639 + ], + "type": "text", + "content": "Justification: We will release code for our VSLS framework upon publication, as mentioned in the abstract. The code will be accompanied by comprehensive documentation detailing the implementation of our four logical dependencies (spatial, temporal, attribute, and causal), the iterative refinement process, and instructions for reproducing our experimental results. Our paper does not introduce new datasets but rather evaluates our method on existing benchmarks including LONGVIDEOBENCH, VIDEO-MME, and HAYSTACK-LVBENCH, which are properly cited throughout the paper." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 642, + 189, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 642, + 189, + 652 + ], + "spans": [ + { + "bbox": [ + 141, + 642, + 189, + 652 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 654, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 141, + 654, + 413, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 654, + 413, + 665 + ], + "spans": [ + { + "bbox": [ + 141, + 654, + 413, + 665 + ], + "type": "text", + "content": "- The answer NA means that the paper does not release new assets." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 666, + 506, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 666, + 506, + 698 + ], + "spans": [ + { + "bbox": [ + 141, + 666, + 506, + 698 + ], + "type": "text", + "content": "- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 700, + 504, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 700, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 141, + 700, + 504, + 721 + ], + "type": "text", + "content": "- The paper should discuss whether and how consent was obtained from people whose asset is used." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 141, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 141, + 72, + 504, + 95 + ], + "type": "text", + "content": "- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 124, + 99, + 358, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 99, + 358, + 111 + ], + "spans": [ + { + "bbox": [ + 124, + 99, + 358, + 111 + ], + "type": "text", + "content": "14. Crowdsourcing and research with human subjects" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 114, + 504, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 114, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 140, + 114, + 504, + 148 + ], + "type": "text", + "content": "Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 152, + 201, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 152, + 201, + 163 + ], + "spans": [ + { + "bbox": [ + 141, + 152, + 201, + 163 + ], + "type": "text", + "content": "Answer: [NA]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 167, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 167, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 140, + 167, + 506, + 233 + ], + "type": "text", + "content": "Justification: Our research does not involve crowdsourcing or human subject experiments. We evaluate our method using existing benchmarks (LONGVIDEOBENCH,VIDEO-MME, LONGVIDEOBENCH) that contain human-annotated ground truth data, but we did not collect new human annotations or conduct human evaluations as part of our work. Our methodology is purely algorithmic, focusing on the semantic-logical frameworks for keyframe selection and evaluation through computational metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 237, + 189, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 237, + 189, + 247 + ], + "spans": [ + { + "bbox": [ + 141, + 237, + 189, + 247 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 249, + 504, + 328 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 141, + 249, + 504, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 249, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 141, + 249, + 504, + 270 + ], + "type": "text", + "content": "- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 272, + 504, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 272, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 141, + 272, + 504, + 305 + ], + "type": "text", + "content": "- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 306, + 504, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 306, + 504, + 328 + ], + "spans": [ + { + "bbox": [ + 141, + 306, + 504, + 328 + ], + "type": "text", + "content": "- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 124, + 331, + 504, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 331, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 124, + 331, + 504, + 354 + ], + "type": "text", + "content": "15. Institutional review board (IRB) approvals or equivalent for research with human subjects" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 358, + 505, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 358, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 140, + 358, + 505, + 402 + ], + "type": "text", + "content": "Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 407, + 201, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 407, + 201, + 418 + ], + "spans": [ + { + "bbox": [ + 141, + 407, + 201, + 418 + ], + "type": "text", + "content": "Answer: [NA]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 422, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 422, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 140, + 422, + 506, + 477 + ], + "type": "text", + "content": "Justification: Our research does not involve human subjects. We utilize existing benchmark datasets (LONGVIDEOBENCH, VIDEO-MME, HAYSTACK-LVBENCH) without collecting new data from human participants. Our work focuses on developing and evaluating algorithmic approaches for keyframe selection based on semantic-logical relationships, which do not require IRB approval or equivalent ethical review processes." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 481, + 189, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 481, + 189, + 491 + ], + "spans": [ + { + "bbox": [ + 141, + 481, + 189, + 491 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 493, + 504, + 605 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 141, + 493, + 504, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 493, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 141, + 493, + 504, + 515 + ], + "type": "text", + "content": "- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 516, + 504, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 516, + 504, + 549 + ], + "spans": [ + { + "bbox": [ + 141, + 516, + 504, + 549 + ], + "type": "text", + "content": "- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 550, + 504, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 550, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 141, + 550, + 504, + 582 + ], + "type": "text", + "content": "- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 583, + 504, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 583, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 141, + 583, + 504, + 605 + ], + "type": "text", + "content": "- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 124, + 609, + 257, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 609, + 257, + 621 + ], + "spans": [ + { + "bbox": [ + 124, + 609, + 257, + 621 + ], + "type": "text", + "content": "16. Declaration of LLM usage" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 140, + 625, + 506, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 625, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 140, + 625, + 506, + 670 + ], + "type": "text", + "content": "Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 673, + 201, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 673, + 201, + 685 + ], + "spans": [ + { + "bbox": [ + 141, + 673, + 201, + 685 + ], + "type": "text", + "content": "Answer: [Yes]" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 140, + 689, + 506, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 689, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 140, + 689, + 506, + 722 + ], + "type": "text", + "content": "Justification: Our Visual Semantic-Logical Search framework uses LLMs (specifically mentioned in Section 3.2 and Figure 2) as part of our query decomposition process. We employ models such as LLAVA-7B and GPT-40 to extract semantic information from" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 143, + 71, + 507, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 71, + 507, + 125 + ], + "spans": [ + { + "bbox": [ + 143, + 71, + 507, + 125 + ], + "type": "text", + "content": "textual queries, including key objects, cue objects, and their logical relationships. This LLM-based decomposition is an integral component of our method, as it enables the identification of the four logical relation types (spatial, temporal, attribute, and causal) that guide our keyframe selection process. The prompt template for this query grounding is provided in Appendix H." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 131, + 189, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 131, + 189, + 141 + ], + "spans": [ + { + "bbox": [ + 141, + 131, + 189, + 141 + ], + "type": "text", + "content": "Guidelines:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 143, + 504, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 143, + 504, + 165 + ], + "spans": [ + { + "bbox": [ + 141, + 143, + 504, + 165 + ], + "type": "text", + "content": "- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 166, + 504, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 166, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 141, + 166, + 504, + 187 + ], + "type": "text", + "content": "- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_content_list.json b/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..cdc33bd38a9d7ced6aa0d6b4a1fe64c7aa3618c7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_content_list.json @@ -0,0 +1,3167 @@ +[ + { + "type": "text", + "text": "Deep Learning Advancements in Anomaly Detection: A Comprehensive Survey", + "text_level": 1, + "bbox": [ + 153, + 70, + 841, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haoqi Huang, Ping Wang $\\text{©}$ , Fellow, IEEE, Jianhua Pei $\\text{©}$ , Graduate Student Member, IEEE, Jiacheng Wang $\\text{©}$ , Shahren Alexanian, and Dusit Niyato $\\text{©}$ , Fellow, IEEE", + "bbox": [ + 91, + 147, + 903, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—The rapid expansion of data from diverse sources has made anomaly detection (AD) increasingly essential for identifying unexpected observations that may signal system failures, security breaches, or fraud. As datasets become more complex and high-dimensional, traditional detection methods struggle to effectively capture intricate patterns. Advances in deep learning have made AD methods more powerful and adaptable, improving their ability to handle high-dimensional and unstructured data. This survey provides a comprehensive review of over 180 recent studies, focusing on deep learning-based AD techniques. We categorize and analyze these methods into reconstruction-based and prediction-based approaches, highlighting their effectiveness in modeling complex data distributions. Additionally, we explore the integration of traditional and deep learning methods, highlighting how hybrid approaches combine the interpretability of traditional techniques with the flexibility of deep learning to enhance detection accuracy and model transparency. Finally, we identify open issues and propose future research directions to advance the field of AD. This review bridges gaps in existing literature and serves as a valuable resource for researchers and practitioners seeking to enhance AD techniques using deep learning.", + "bbox": [ + 73, + 234, + 491, + 513 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Anomaly detection, deep learning, data reconstruction and prediction, Internet of things, comprehensive survey.", + "bbox": [ + 73, + 520, + 491, + 559 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 583, + 351, + 597 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "An anomaly refers to an observation that significantly deviates from the expected behavior in a system, often appearing unusual, inconsistent, or unexpected [1]. Despite the fact that outliers typically constitute only a small fraction of a dataset, they are often highly crucial because they carry important information and can reveal critical insights during analysis. Consequently, anomaly detection (AD) is the process of identifying such anomalous observations using various methods and algorithms, which aids decision-makers in better understanding data patterns and behaviors.", + "bbox": [ + 73, + 604, + 490, + 755 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rapid development of the Internet of Things (IoT) has revolutionized the way data is generated, collected, and analyzed across various domains. IoT systems leverage a wide array of interconnected sensors and devices to collect massive", + "bbox": [ + 73, + 756, + 491, + 816 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "H. Huang, P. Wang and S. Alexanian are with the Lassonde School of Engineering, York University, Toronto, ON M3J 1P3, Canada (e-mail:joycehhq@yorku.ca; pingw@yorku.ca; yu263319@my.yorku.ca). J. Pei is with the State Key Laboratory of Advanced Electromagnetic Technology, School of Electrical and Electronic Engineering, Huazhong University of Science and Technology, Wuhan 430074, China (e-mail: jianhuapei@hust.edu.cn). J. Wang and D. Niyato are with the School of Computer Science and Engineering, Nanyang Technological University, Singapore (e-mail: jcwang_cq@foxmail.com; dniyato@ntu.edu.sg).", + "bbox": [ + 73, + 829, + 491, + 944 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "amounts of real-time data in diverse applications, including smart cities [2], industrial automation [3], healthcare [4], and transportation [5], etc. This proliferation of sensor data introduces unprecedented opportunities for enhancing operational efficiency and decision-making processes. However, it also presents significant challenges, as the data is often high-dimensional, noisy, and prone to anomalies caused by faulty sensors, environmental changes, or malicious attacks [6]. Detecting anomalies in data is critical for ensuring system reliability, security, and performance [7].", + "bbox": [ + 501, + 234, + 921, + 385 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AD methodologies can be systematically classified according to various criteria. One prominent classification framework differentiates these methods into supervised, semi-supervised, and unsupervised approaches, predicated on the availability and nature of labeled data [8]. Supervised learning-based AD algorithms necessitate a fully labeled dataset, where each data point is explicitly annotated as either normal or anomalous. This labeling process facilitates the model's ability to discern and learn the underlying characteristics that differentiate anomalous instances from normal ones, thereby enhancing its detection accuracy. Semi-supervised learning-based methods, on the other hand, operate with a dataset comprising a substantial volume of unlabeled data alongside a smaller subset of labeled instances. These labels may include both normal and anomalous data, or in certain cases, solely normal instances [9]. In scenarios where only normal data is labeled, the semi-supervised approach converges towards unsupervised methodologies, as the model predominantly learns normal behavior patterns and identifies anomalies as deviations from these learned patterns. Unsupervised learning-based AD methods eschew the need for labeled data entirely, leveraging the intrinsic structural properties of the dataset to autonomously identify anomalies [10] [11]. In practical applications, a significant portion of contemporary AD research gravitates towards unsupervised methods [12]. This preference is largely driven by the substantial imbalance between the number of normal instances and anomalies, which complicates the acquisition of a sufficiently large labeled dataset required for effective supervised learning [13]. Moreover, anomalies are frequently correlated with critical failures or hazardous events, rendering the labeling process both costly and logistically challenging. Another key classification criterion is the nature of the dataset, particularly whether it comprises time-series data, which distinguishes AD methods into time-series [14] and non-temporal approaches. The applications of time-series and non-temporal AD will be discussed in detail in Section III.", + "bbox": [ + 501, + 386, + 921, + 928 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In addition to the temporal aspect, AD techniques can also", + "bbox": [ + 519, + 929, + 921, + 945 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 40 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13195v1 [cs.LG] 17 Mar 2025", + "bbox": [ + 22, + 260, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "be categorized based on their underlying paradigms: traditional methods and deep learning-based methods. Traditional techniques encompass statistical approaches [15], distance-based methods [16], and clustering algorithms [17]. These approaches generally rely on estimating the probability distribution of normal data to predict anomalies. However, since the early 20th century, the fields of data science, machine learning, deep learning, and artificial intelligence have witnessed exponential growth, with significant implications for AD [18]. Particularly in recent years, the advent of soft-computing techniques has significantly influenced the development of deep learning-based methods. These techniques are characterized by their ability to handle imprecise, uncertain, and nonlinear data, making them highly suitable for applications involving deep learning. Consequently, deep learning-based methods have been propelled to the forefront due to their superior capability to learn expressive representations of complex data, including high-dimensional, temporal, spatial, and graph-structured data [19]. By proficiently modeling intricate patterns and relationships inherent in the data, deep learning approaches have proven remarkably effective in identifying anomalies across a wide range of challenging and complex datasets. This paper concentrates specifically on AD methods based on deep learning models, with the objective of providing a comprehensive review of this rapidly evolving field.", + "bbox": [ + 73, + 69, + 491, + 448 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Contrasting Traditional Models with Deep Learning Models", + "text_level": 1, + "bbox": [ + 73, + 458, + 488, + 487 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Traditional AD methods [20], such as statistical techniques, clustering algorithms [21], and Principal Component Analysis (PCA) [22], have long been established as reliable tools across a wide spectrum of applications due to their simplicity, interpretability, and low computational overhead. These characteristics make them particularly promising in scenarios where model transparency and efficiency are paramount. Statistical techniques, for example, provide clear, rule-based mechanisms for detecting anomalies, while clustering algorithms are effective in grouping similar data points and isolating outliers in relatively low-dimensional datasets. Similarly, PCA has been widely adopted for dimensionality reduction, enabling effective AD by isolating principal components that capture major variations in the data [17]. Despite these advantages, traditional methods often encounter significant limitations when applied to modern, complex datasets. Statistical techniques generally assume that data adheres to specific distributions. However, this assumption is rarely met in real-world scenarios, where data often exhibits non-Gaussian distributions and heavy tails. Clustering-based methods, while useful in many contexts, check to accurately define clusters, particularly when anomalies do not present clear separability from normal data. PCA, on the other hand, relies heavily on the assumption of linearity and extensive feature engineering, making it less effective at capturing the nuanced, non-linear patterns prevalent in high-dimensional datasets [22]. These constraints have prompted a shift towards more advanced approaches capable of handling the increasing complexity of modern data.", + "bbox": [ + 73, + 492, + 491, + 915 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In contrast, deep learning models have recently emerged as a powerful alternative, addressing many of the shortcom", + "bbox": [ + 73, + 914, + 491, + 945 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/01a5c0345962b2f97cd0ef64d890f1c98d4af1b99d38c5a98823b19ce41cfe61.jpg", + "image_caption": [ + "Fig. 1. The anatomy of this survey." + ], + "image_footnote": [], + "bbox": [ + 511, + 66, + 910, + 690 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ings inherent in traditional approaches. Deep neural networks (DNNs) possess the capacity to autonomously learn complex patterns and hierarchical representations from raw data, thereby obviating the need for labor-intensive feature engineering [23]. This characteristic is particularly advantageous in the detection of subtle and multifaceted anomalies that might elude traditional methods [24]. By leveraging their multilayered architectures, deep learning models excel in processing high-dimensional and unstructured data, such as images, videos, and text, which are often challenging for conventional methods to handle effectively [25]. These models are adept at capturing non-linear relationships and interactions within the data, offering a more flexible and robust framework for AD", + "bbox": [ + 501, + 747, + 921, + 945 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "[26]. Consequently, there has been a significant shift away from purely traditional AD techniques towards the adoption of deep learning methodologies.", + "bbox": [ + 73, + 69, + 491, + 113 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Nonetheless, it is crucial to acknowledge that traditional AD models retain certain advantages, notably in their simplicity, interpretability, and lower computational overheads [27]. These characteristics make them particularly appealing in scenarios where model transparency and computational efficiency are crucial. In recognition of these strengths, Section V of this paper will introduce and discuss various existing approaches that integrate traditional methods with deep learning techniques. These hybrid methods aim to leverage the strengths of both paradigms, resulting in more robust and efficient AD systems.", + "bbox": [ + 73, + 114, + 491, + 280 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Comparison With Existing Surveys", + "text_level": 1, + "bbox": [ + 73, + 297, + 334, + 313 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In recent years, the field of AD has seen a surge in research, particularly with the advent of deep learning methods. Numerous surveys have been published, each attempting to provide a comprehensive overview of the field. However, many of these surveys focus on broader historical developments or cover deep learning techniques only up to a certain point in time. For example, surveys such as [19], [28], [29], and [23] primarily cover techniques developed up to 2020. While these surveys are valuable, they do not reflect the most recent advancements in the field. Furthermore, specific models such as Generative Adversarial Network (GAN)-based AD have been explored in-depth by studies [30], [31], [32], [33], and [34]. However, these studies primarily address foundational approaches and lack coverage of advanced techniques like conditional GANs, cycle-consistent GANs, and GANs integrated with self-supervised learning. Emerging hybrid models, combining GANs with Variational Autoencoders (VAEs) or autoencoders for improved robustness, are also underrepresented. In contrast, our survey covers the literature from 2019 to 2024, providing a timely and comprehensive overview of the latest advancements. By focusing on recent trends and evolving techniques, including enhanced architectures and hybrid frameworks, our work offers a more current perspective, bridging existing gaps and guiding future research directions in AD.", + "bbox": [ + 73, + 316, + 493, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C. Contributions and Structure", + "text_level": 1, + "bbox": [ + 75, + 712, + 290, + 726 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This survey systematically reviews over 160 recent research papers on AD, including publications from leading journals (IEEE, ACM, Springer, Elsevier) and top-tier conferences (AAAI, CCS, ICCV) spanning from 2019 to 2024. By focusing on cutting-edge advancements in deep learning-based methods, this survey ensures a comprehensive and up-to-date overview of the field. The contributions of this survey are summarized as follows:", + "bbox": [ + 73, + 731, + 490, + 851 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- This survey addresses gaps in prior surveys by highlighting advanced techniques that were previously underexplored, including conditional GANs, cycle-consistent GANs, and hybrid frameworks combining GANs with VAEs. These models are introduced and analyzed to demonstrate their strengths and weaknesses.", + "bbox": [ + 91, + 854, + 491, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- This survey provides a detailed comparison of reconstruction-based and prediction-based methods. To enhance clarity and usability, we summarize key strengths, weaknesses, and applications in structured tables, offering readers insights into the trade-offs of different models.", + "bbox": [ + 522, + 69, + 921, + 157 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Recognizing the strengths of traditional methods, this survey explores their integration with deep learning models. Hybrid approaches, such as clustering, normalizing flows, and support vector data descriptions combined with deep learning, are analyzed to address complex challenges in AD.", + "bbox": [ + 522, + 159, + 921, + 247 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The organization of this survey is shown in Fig.1. Section II provides an overview of data characteristics and anomaly types, followed by a discussion of common data processing challenges and mitigation strategies critical to effective AD. Section III explores the related applications of AD. Section IV categorizes and analyzes deep learning methods for AD, highlighting their effectiveness and limitations. Section V discusses the integration of traditional methods with deep learning, including clustering methods, normalizing flows, and support vector data descriptions. Section VI highlights open issues and future directions, such as challenges in data collection, computational complexity, explainability, and handling diverse anomaly types. Finally, Section VII concludes the survey with a summary and potential directions for future research.", + "bbox": [ + 503, + 253, + 924, + 467 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "II. DATA CHARACTERISTICS AND CHALLENGES", + "text_level": 1, + "bbox": [ + 542, + 489, + 880, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Overview of Input Data and Anomaly Types", + "text_level": 1, + "bbox": [ + 503, + 515, + 828, + 531 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In AD, input data presents unique challenges due to its structure, dimensionality, and temporal nature. Different types of data require specialized techniques to effectively identify anomalies, and the nature of anomalies themselves can vary greatly depending on the domain and data format [28]. For instance, visual data such as images and videos may exhibit anomalies associated with spatial or temporal inconsistencies, while time series data often involves anomalies related to trends or sudden changes in values over time. To better understand these variations, we first categorize data into textual, audio, image, and video formats, highlighting their respective characteristics and the challenges they pose for AD. Beyond this classification, data can also be viewed through the lens of temporal dependencies, distinguishing between time-series data, which captures sequential patterns over time, and nontemporal data, where observations are independent of temporal order. This dual perspective provides a comprehensive framework for analyzing how different types of anomalies manifest across various data formats. Furthermore, the nature of anomalies themselves can vary depending on the data format. Point anomalies, sequence anomalies, and outliers may all manifest differently across different data types and structures. Understanding these distinctions is essential for selecting the appropriate AD techniques [29], as a deep understanding of data characteristics and anomaly types ensures that detection methods are effectively tailored to capture the specific behaviors and patterns indicative of anomalies.", + "bbox": [ + 501, + 537, + 924, + 946 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "1) Categorization by Data Type:", + "text_level": 1, + "bbox": [ + 91, + 69, + 316, + 84 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Textual Data: Textual data consists of sequences of discrete symbols, such as characters, words, or phrases, structured in a linear format. Unlike other data types, textual data conveys information through syntactic and semantic relationships. It can be found in various forms, including documents, chat messages, emails, and system logs. Anomalies in textual data may appear as irregular word sequences, syntactic inconsistencies, missing or misplaced words, or semantically incoherent phrases.", + "bbox": [ + 91, + 89, + 491, + 224 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Audio Data: Audio data captures variations in amplitude and frequency over time, representing spoken language, environmental sounds, or machine signals. It can be stored as waveforms or transformed into frequency-domain representations like spectrograms. Unlike textual data, audio data is continuous and often requires spectral analysis to extract meaningful patterns. Anomalies in audio data manifest as unexpected distortions, unusual frequency shifts, missing segments, or abnormal sound patterns caused by malfunctioning equipment, altered speech, or environmental noise.", + "bbox": [ + 91, + 224, + 491, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Image Data: Image data consists of two-dimensional pixel grids, where each pixel represents intensity or color information. Unlike sequential data, image data encodes spatial relationships, capturing textures, shapes, and patterns. Image anomalies often appear as distortions, irregular textures, missing components, or unexpected objects that deviate from normal patterns. For instance, these can result from manufacturing defects, medical imaging errors, or environmental changes in satellite imagery.", + "bbox": [ + 91, + 391, + 491, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Video Data: Video data extends image data by incorporating a temporal dimension, forming sequences of frames over time. Each frame within a video is an image, and the relationships between frames capture motion and dynamic interactions [35]. Unlike static images, video data requires modeling temporal dependencies, making AD more complex. Anomalies in video data include irregular movements, unexpected scene transitions, or unusual object behaviors, which are commonly observed in surveillance footage, traffic monitoring, and activity recognition.", + "bbox": [ + 91, + 542, + 491, + 708 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Tabular Data: Tabular data consists of structured records organized in rows and columns, where each row represents an entity or event, and each column corresponds to an attribute. This type of data is widely used in databases, spreadsheets, financial records, and sensor logs. Unlike the other data types, tabular data can contain numerical, categorical, or mixed-format information. Anomalies in tabular data include missing values, unexpected categorical labels, numerical outliers, or inconsistent relationships between attributes.", + "bbox": [ + 91, + 708, + 491, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2) Categorization by Temporal Characteristics:", + "text_level": 1, + "bbox": [ + 91, + 864, + 415, + 878 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Time-based data: Time-based data can be represented as a sequence of observations recorded over time, and it may consist of either a single variable (univariate) or multiple variables (multivariate). We can generalize the", + "bbox": [ + 91, + 883, + 491, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "representation of both univariate and multivariate time series using the following formula: $X = \\{x_{t,j}\\}_{t\\in T,j\\in J}$ , where $t\\in T$ denotes the time index, with $t$ representing a specific time step and $T$ being the set of all time steps in the dataset. Similarly, $j\\in J$ represents the dimension or variable index, where $j$ refers to a particular variable and $J$ is the set of all variables or dimensions in the data. When $|J| = 1$ , the series is univariate, meaning there is only one variable observed over time. In contrast, when $|J| > 1$ , the series is multivariate, indicating that multiple variables are recorded simultaneously at each time step. Each observation $x_{t,j}$ corresponds to the value of the $j$ -th variable at time $t$ . Among the five previously introduced data types, audio, video, and certain types of textual and tabular data are inherently time-based. Audio data is naturally sequential, with sound signals evolving over time, making anomalies such as distortions or frequency shifts dependent on temporal patterns. Video data extends image sequences over time, requiring the detection of abnormal object movements, scene transitions, or motion inconsistencies. Textual data, such as streaming logs, system event records, or chat conversations, also exhibits temporal dependencies, where anomalies may appear as unexpected event sequences or irregular timing between log entries. Similarly, tabular data in the form of financial transactions, sensor readings, or stock prices follows a time-series format, where anomalies may indicate fraud, equipment failure, or unusual market behaviors.", + "bbox": [ + 535, + 69, + 921, + 492 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Non-temporal data: Non-temporal data refers to observations that lack a temporal sequence, where the relationships between data points are independent of time. Such data is prevalent across industries that rely on static datasets or event-based observations. AD in non-temporal data focuses on identifying irregularities by analyzing data characteristics, patterns, or statistical properties rather than temporal dependencies. This process is crucial for uncovering hidden risks, fraudulent activities, or system malfunctions in contexts where time is not a defining factor. Among the five data types, image and certain types of tabular data are the most common forms of non-temporal data. Image data, such as medical scans, industrial defect detection images, or satellite photos, captures spatial relationships but does not depend on a temporal sequence. Anomalies in such data typically appear as unusual textures, distortions, or unexpected objects. Tabular data, when not used for time-series analysis, is also non-temporal, such as customer records, product attributes, or static financial datasets. In these cases, AD focuses on identifying outliers, inconsistencies, or unusual relationships between different features rather than changes over time.", + "bbox": [ + 522, + 492, + 921, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3) Types of Anomalies:", + "text_level": 1, + "bbox": [ + 521, + 845, + 684, + 861 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Point Anomalies: A single data point deviates significantly from the expected behavior in the dataset. These are common across both time-based and non-time-based data, representing sudden outliers or unusual values.", + "- Contextual Anomalies: A data point is considered" + ], + "bbox": [ + 521, + 869, + 921, + 944 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "anomalous only when it is analyzed within a specific context or surrounding data. In time-based data, this could involve seasonal trends or time-of-day variations, whereas in non-time-based data, it could depend on relationships between variables.", + "bbox": [ + 106, + 69, + 491, + 143 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Subsequence Anomalies: A contiguous sequence of data points behaves abnormally, typically found in time series data. These anomalies are significant when the temporal order of data points plays a key role in detecting deviations from expected patterns.", + "- Cluster-based and Correlation Anomalies: Anomalies that occur when a group of data points, or relationships between variables, deviate from expected patterns. This is more prominent in non-time-based data, where detecting irregular clusters or correlations between features is essential for AD." + ], + "bbox": [ + 93, + 145, + 491, + 310 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B. Data Processing", + "text_level": 1, + "bbox": [ + 75, + 334, + 212, + 348 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Effective AD requires careful preparation and preprocessing of input data to ensure that detection algorithms can operate effectively. In many cases, raw data contains inherent challenges that can significantly hinder the performance of AD models. These challenges arise from the complexity of real-world data, including high dimensionality, missing or sparse values, skewed class distributions, and noise that can obscure true anomalies. Without addressing these issues, AD methods may struggle to accurately identify rare or subtle deviations in the data, leading to false positives, missed anomalies, or inefficient computations. Therefore, appropriate data preprocessing steps are crucial for improving detection accuracy, robustness, and overall system reliability. This subsection outlines some of the most common data processing issues and their implications for AD, along with strategies to mitigate these challenges.", + "bbox": [ + 73, + 354, + 491, + 580 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Dimensionality: High-dimensional data makes AD more complex due to the \"curse of dimensionality\". As datasets expand in size and complexity—particularly with the rise of \"big data\", characterized by large-scale, high-velocity data generated from diverse sources, it becomes increasingly difficult for AD methods to maintain accuracy [36]. To address this issue, dimensionality reduction is a common approach that transforms a large set of input features into a smaller, more focused feature set [37]. While traditional methods such as PCA [38] are frequently used, they may struggle to capture nonlinear relationships in complex data. For instance, Sakurada et al. [39] compare autoencoders, which perform non-linear dimensionality reduction, with linear PCA and kernel PCA on both synthetic and real-world datasets. The study reveals that on the nonlinear and high-dimensional synthetic Lorenz dataset, AE achieved a relative AUC improvement of $26.83\\%$ compared to linear PCA. This highlights that autoencoders can even detect anomalies in data with relatively high intrinsic dimensionality, where linear PCA struggles to perform.", + "2) Sparsity: Sparse data, where many values are missing or incomplete, poses significant challenges for AD. Sparse datasets can lead to reduced detection accuracy, as missing or incomplete data points may obscure the underlying patterns necessary for detecting anomalies [36]. Cheng et al." + ], + "bbox": [ + 73, + 580, + 491, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "[40] highlight that in high-dimensional settings, the sparsity problem is further amplified as the data becomes more spread out, increasing the risk of missing critical information that signals anomalies. To address these challenges, Li et al. [41] propose an improved low-rank and sparse decomposition model (LSDM) for hyperspectral AD. Their approach models sparse components as a Gaussian Mixture (MoG), effectively capturing anomalous patterns within complex datasets by leveraging the low-rank structure. In contrast, Han et al. [42] take a different approach by introducing sparse autoencoders to learn sparse latent representations from high-dimensional input data. Through experiments on three real-world cyber-physical system datasets, the study shows that mining sparse latent patterns from high-dimensional time series can significantly improve the robustness of AD models.", + "bbox": [ + 501, + 68, + 921, + 294 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3) Class Imbalance: In most AD tasks, the occurrence of anomalies is significantly rarer than normal data points, resulting in a class imbalance problem. This imbalance can cause detection algorithms to be overly biased toward the majority class (normal data), leading to a higher rate of false negatives where critical anomalies are missed. In imbalanced datasets, it is often possible to achieve an overall high accuracy, while the recall score for the minority class (anomalies) remains very low [43]. Traditional methods to mitigate this issue involve oversampling the minority class or undersampling the majority class [44]. Recent research has increasingly focused on introducing Data Generation Models (DGM) to improve the representation of the minority class in AD. For instance, Dlamini et al. [45] use Conditional Generative Adversarial Networks (CGANs) to generate synthetic samples for the minority class and combines this with KL divergence to guide the model in accurately learning the distribution of the minority class.", + "4) Noise in Data: Noise refers to random or irrelevant information present in the data, which can obscure true anomalies and lead to false positives. In addition, during the training process of AD models, the high complexity of the model and the presence of noisy data can lead to overfitting, where the model inadvertently learns to fit the reconstruction error from noisy inputs rather than focusing on genuine anomalies [46]. To reduce the impact of noisy data, Zhang et al. [47] incorporate a Maximum Mean Discrepancy (MMD) to encourage the distribution of low-dimensional representations to approximate a target distribution. The goal is to align the distribution of noisy data with that of normal training data, thereby reducing the risk of overfitting. Furthermore, Chen et al. [48] propose a novel method called Noise Modulated Adversarial Learning, where noise images from a predefined normal distribution are fed into the discriminator network as negative samples. This adversarial process modulates the training of the reconstruction network, balancing the learning between the two networks to improve robustness against noise.", + "5) Privacy of data: In many fields, such as healthcare, finance, and cybersecurity, data used for AD often contains sensitive or personal information. Ensuring the privacy and security of this data is paramount, as improper handling could lead to serious legal and ethical violations. Hassan et al. [49] conducte an in-depth investigation into the privacy of AD" + ], + "bbox": [ + 503, + 295, + 921, + 944 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "models in blockchain technology. To address these privacy concerns, Federated Learning (FL), a distributed machine learning paradigm, has emerged as a promising supplement to AD [50]. FL allows distributed clients to collaboratively train a shared model while protecting the privacy of their local data. For example, Idrissi et al. [51] propose Fed-ANIDS, which leverages FL to address the privacy issues associated with centralized Network Intrusion Detection Systems (NIDS). This model was applied to various settings and popular datasets, demonstrating its ability to achieve high performance while preserving the privacy of distributed client data. Cui et al. [52] further introduce GAN into FL and design a new algorithm model that injects controllable noise into local model parameters, ensuring both AD utility and compliance with differential privacy requirements.", + "bbox": [ + 73, + 69, + 491, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "III. RELATED APPLICATIONS", + "text_level": 1, + "bbox": [ + 178, + 313, + 387, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "With the rapid advancement of deep learning models, AD has become more efficient and adaptable. These sophisticated models have been widely applied across various domains, enhancing the ability to identify irregular patterns in complex and high-dimensional datasets. In the previous chapter, we categorized data based on temporal characteristics into time-series and non-time-series data. However, visual data presents unique challenges, detection requirements, and a wide range of applications, making it difficult to be strictly classified as either time-series or non-time-series data. It can be static (e.g., images) or dynamic (e.g., videos), where images are typically considered non-time-series data, while videos fall under time-series data. Visual data is extensively used in fields such as medical imaging, autonomous systems, and surveillance, where detecting anomalies requires specialized deep learning techniques that differ from traditional numerical or categorical data analysis. To better reflect its broad applications and distinct computational needs, we discuss visual data separately. Based on this classification, we will now explore the applications of deep learning in AD from three perspectives: time-series data, non-temporal data, and visual data.", + "bbox": [ + 73, + 330, + 491, + 650 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Applications in Time Series Data", + "text_level": 1, + "bbox": [ + 73, + 669, + 326, + 683 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Time series data, defined by its sequential nature over time, is fundamental to many systems where the temporal order of events critically influences analysis and decision-making processes. AD in time series data has become an indispensable technique across various industries, enabling the early detection of irregular patterns that may indicate underlying issues or emerging threats. The applications of time series AD are extensive, impacting critical areas such as traffic monitoring, power system management, and healthcare. In the following sections, we present how these applications leverage AD to enhance operational efficiency, ensure system reliability, and improve safety across these fields.", + "bbox": [ + 73, + 686, + 490, + 867 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1) Traffic Monitoring: Time series AD plays a pivotal role in modern traffic management systems. As demonstrated in [53], real-time data from loop detection sensors are integrated and analyzed to predict traffic volume and enhance system safety. The ability to detect anomalies in traffic patterns is", + "bbox": [ + 73, + 869, + 491, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "essential for anticipating and responding to potential incidents before they escalate. For instance, Li et al. [54] present a method that identifies traffic incidents by detecting anomalies in traffic time series data, thereby helping users avoid accidents and reduce travel time. Furthermore, high-speed driving is identified as a significant contributor to traffic accidents [55]. By monitoring and analyzing sudden increases in vehicle speed, AD techniques can predict and prevent accidents more effectively, providing a critical tool for improving road safety. Zhao et al. [56] further validate the efficacy of unsupervised AD methods in assessing elevated road traffic accident risks, specifically by analyzing volume and speed data from traffic on Yan'an elevated road. This approach enhances the ability to detect and respond to hazardous traffic conditions in real-time, underscoring the indispensable role of AD in traffic management.", + "bbox": [ + 501, + 69, + 921, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2) Power System: AD is a vital element in ensuring the stability, security, and reliability of electrical grids. By continuously monitoring grid data, these techniques can swiftly identify deviations from normal operational patterns, which may indicate issues such as natural faults or malicious cyberattacks. The ability to detect these anomalies in real-time is crucial for preventing potential outages and maintaining a consistent power supply. For instance, Li et al. [57] highlight that accurate and real-time AD can enhance grid stability by over $20\\%$ , providing rapid response capabilities that significantly bolster the system's defense against both natural disruptions and cyber threats. Furthermore, the introduction of a residential electrical load AD framework, as demonstrated in [58], has been shown to significantly improve both load prediction accuracy and AD, thereby optimizing demand-side management (DSM) in residential areas. In terms of cybersecurity, the MENSA Intrusion Detection System (IDS) [59] has proven to be a formidable tool in smart grid environments, effectively detecting operational anomalies and classifying a wide range of cyberattacks. This capability not only protects critical infrastructure but also underscores the indispensable role of AD in modern power system management.", + "bbox": [ + 503, + 325, + 921, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3) Healthcare: AD plays a crucial role in healthcare by enabling continuous monitoring of patient vital signs, such as heart rate and blood pressure, to swiftly identify abnormal conditions that may require urgent medical intervention. The application of AD in medical signal analysis is particularly important, as highlighted in [60], where the identification of data samples that deviate from the typical data distribution can reveal underlying issues such as noise, changes in a patient's condition, or the emergence of new and previously undetected medical conditions. This capability is essential for ensuring accurate diagnosis and timely patient care. Furthermore, Keeley et al. [61] demonstrate that AD algorithms can effectively identify irregularities in heart rate data, which not only facilitates faster emergency responses but also provides deeper insights into a patient's health status. This, in turn, enhances overall patient care while also reducing the cognitive load on healthcare professionals by automating the detection of potential issues.", + "bbox": [ + 503, + 672, + 921, + 944 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "B. Applications in Non-temporal Data", + "text_level": 1, + "bbox": [ + 75, + 69, + 339, + 84 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "AD in non-temporal data plays a critical role in ensuring operational integrity, security, and financial stability. By focusing on identifying irregularities within independent events or static datasets, it addresses potential risks such as fraud, system failures, and malicious activities. Unlike time-series applications, non-temporal AD leverages data patterns and statistical analysis to uncover deviations that signal anomalies. In the following, we present specific applications across domains such as finance and cybersecurity, showcasing its significant impact on safeguarding critical systems and operations.", + "bbox": [ + 73, + 90, + 491, + 242 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1) Finance: In the financial sector, non-temporal data AD is pivotal for identifying fraudulent transactions, credit scoring anomalies, and unusual trading activities. Unlike time series data, these financial fraud detection tasks often involve independent events, such as individual transactions or credit score evaluations, which do not rely on temporal sequences. Instead, the focus is on transaction characteristics and patterns that may indicate fraudulent behavior. Various data mining techniques, including SVM, Naïve Bayes, and Random Forest, are extensively employed to detect different forms of financial fraud, such as bank fraud, insurance fraud, financial statement fraud, and cryptocurrency fraud [62]. As highlighted by [63], AD is critical in quickly identifying activities that deviate from normal patterns, thereby enabling rapid intervention to minimize financial losses.", + "bbox": [ + 73, + 243, + 491, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2) Cybersecurity: AD is a fundamental component of maintaining a secure and resilient cyberspace. As [64] points out, advanced security controls and resilience analysis are crucial during the early stages of system deployment to ensure long-term sustainability. AD plays a pivotal role in this process by identifying unauthorized access, malicious activities, and network intrusions that deviate from established norms. This capability is essential for safeguarding network security and preventing potential breaches. Early research in deep learning-based network intrusion detection focused on architectures such as Autoencoders (AE), Deep Belief Networks (DBN), and Recurrent Neural Networks (RNN) [24]. As deep learning technology has advanced, more sophisticated models have been developed for detecting anomalies in cybersecurity. For instance, Singh et al. [65] illustrate the benefits of AD in wide-area protection schemes (WAPS) by using a deep learning-based cyber-physical AD system (CPADS) to detect and mitigate data integrity and communication failure attacks in centralized Remedial Action Schemes (CRAS). Similarly, Nagarajan et al. [66] highlights the effectiveness of AD in enhancing the security of Cyber-Physical Systems (CPSs) by accurately identifying anomalous behaviors, thereby addressing the growing challenges posed by sophisticated cyberattacks and the increasing volume of data.", + "bbox": [ + 73, + 470, + 493, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "C. Applications in Visual data", + "text_level": 1, + "bbox": [ + 75, + 862, + 287, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "AD in visual data, encompassing images and videos, plays a vital role in numerous industries where visual inspection is critical. Applications range from detecting defects in manufacturing processes to identifying medical abnormalities in", + "bbox": [ + 73, + 883, + 491, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "imaging, monitoring public safety through surveillance systems, and ensuring quality control in production lines. By leveraging advanced deep learning techniques, AD methods can automatically identify and analyze irregularities with high precision, reducing reliance on manual inspection and improving efficiency. In this section, we explore key applications of visual data-based AD, highlighting its transformative impact across various domains.", + "bbox": [ + 501, + 69, + 921, + 188 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1) Medical Imaging: AD in medical imaging is indispensable across numerous medical specialties, playing a crucial role in the early detection and diagnosis of diseases. In radiology, it is employed to identify anomalies in X-rays [67], brain imaging [68], and CT scans [69], thereby aiding in the accurate diagnosis of various conditions. However, as [70] highlights, anomalies in medical images often closely resemble normal tissue, posing a significant challenge to detection due to their subtle differences. This similarity requires the use of sophisticated techniques to effectively distinguish between normal and anomalous data. For example, Draelos et al. [71] demonstrate the power of machine learning in radiology, significantly enhancing the classification performance for multiple abnormalities in chest CT volumes, achieving an AUROC greater than 0.90 for 18 different abnormalities. Additionally, Shvetsova et al. [72] showcase a novel method for AD in medical images, which dramatically improves the detection of subtle abnormalities in complex, high-resolution images, such as chest X-rays and pathology slides—scenarios where traditional models often fail. Furthermore, Zhao et al. [73] introduce the SALAD framework, which enhances AD in medical images by utilizing self-supervised and translation-consistent features from normal data. This approach is particularly effective in situations where labeled anomalous images are scarce, thereby improving detection accuracy in challenging medical imaging tasks.", + "bbox": [ + 501, + 189, + 921, + 582 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2) Video Monitoring: Video AD (VAD) has become increasingly crucial with the rise of large-scale multimedia data analysis, particularly in the processing of video data [74]. VAD focuses on identifying unusual patterns or behaviors in video footage that deviate from the norm, making it a vital tool in several domains. In security and surveillance, VAD is used to monitor public spaces, buildings, and secure areas, enabling the detection of suspicious activities, unauthorized access, and unusual crowd behaviors, thereby enhancing public safety [75]. In the realm of traffic monitoring, VAD facilitates the real-time identification of accidents and irregular traffic patterns, allowing for prompt response and management [76]. Additionally, VAD is applied in behavioral analysis to detect abnormal behaviors in various environments, such as schools, workplaces, and public transportation systems, contributing to the maintenance of safety and order. For example, Chen et al. [77] propose a bidirectional prediction framework specifically designed for AD in surveillance videos. This innovative approach employs forward and backward prediction subnetworks to predict the same target frame, constructing a loss function based on the real target frame and its bidirectional predictions. Experimental results demonstrate that this model outperforms existing approaches on various surveillance video datasets, including those featuring pedestrians and street scenes, showcas", + "bbox": [ + 503, + 582, + 921, + 946 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ing its superior performance in accurately detecting anomalies in real-world surveillance scenarios.", + "bbox": [ + 73, + 69, + 491, + 98 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "IV. DEEP LEARNING METHODS FOR ANOMALY DETECTION", + "text_level": 1, + "bbox": [ + 116, + 116, + 449, + 143 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The application of deep learning to AD has revolutionized the way we identify irregularities in both time-based and non-time-based datasets [78]. Traditional methods, such as statistical analysis and clustering, have been commonly used to detect anomalies. However, these methods often struggle with high-dimensional data, complex relationships, and capturing intricate patterns. Deep learning models, with their ability to learn hierarchical representations and detect subtle anomalies, have emerged as powerful tools to overcome these limitations.", + "bbox": [ + 73, + 150, + 491, + 286 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Fig.2, this section introduces three major deep learning approaches applied to AD: reconstruction-based methods, prediction-based methods, and hybrid approaches. Each approach leverages the strengths of deep learning in distinct ways to improve AD accuracy, particularly in scenarios where data patterns are complex, unstructured, or temporal.", + "bbox": [ + 73, + 286, + 491, + 377 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "A. Deep learning methods for Anomaly Detection based on Reconstruction", + "text_level": 1, + "bbox": [ + 73, + 397, + 491, + 425 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reconstruction-based approaches operate by training a model to learn the underlying distribution of normal data [79]. Once trained, the model attempts to reconstruct incoming data. The reconstruction error, which is the difference between the original data and its reconstruction, is then used as an indicator of anomaly. A high reconstruction error suggests that the data is anomalous, as it deviates from the learned normal patterns. Deep learning-based reconstructive models have become prominent due to their ability to capture complex patterns in high-dimensional data. In recent years, most reconstruction-based AD models have been developed using techniques such as GAN, AE, and diffusion models. These models each have unique strengths and weaknesses, as summarized in Table I. This table consolidates insights from multiple studies, including [80], [81], [82], and [83], which have analyzed the advantages and limitations of GANs, VAEs, and Diffusion Models in AD. In this section, we introduce these three types of models in the context of AD and discuss their various variants.", + "bbox": [ + 73, + 431, + 491, + 715 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "1) GAN-based Anomaly Detection: GANs are powerful tools for generating synthetic data that resembles a given training dataset [84]. As shown in the upper part of Fig.3, GANs consist of two main components: a generator and a discriminator, both of which are neural networks. Because of this structure, GAN models are highly flexible, allowing for different networks to be chosen as the generator and discriminator based on the specific task. This flexibility makes GANs a versatile framework for a wide range of applications. The generator $G$ takes a random noise vector $z$ (usually sampled from a Gaussian distribution) as input and generates synthetic data $G(z)$ . The discriminator $D$ receives a data sample (either from the real dataset or from the generator) as input and outputs a probability $D(x)$ , representing the likelihood that the input is real (i.e., from the actual dataset) rather than fake (i.e.,", + "bbox": [ + 73, + 718, + 491, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0582fc6497eafabed1bd3451b1290a6b09ece498ffa50d1156dd7f0120e507ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 66, + 885, + 130 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/880d55c7ec7dc69d5f5d9f1f2b32b3c2153e316b56c5560359f5852c0d17b74d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 143, + 885, + 210 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ab7aedd616ec4505c3fc87c94f267fe1e3ff25684d846743f04d6cdec18a9037.jpg", + "image_caption": [ + "Fig. 2. Three types of anomaly detection: (a) Reconstruction-based approache, (b) Prediction-based approache, (c) Hybrid method." + ], + "image_footnote": [], + "bbox": [ + 532, + 212, + 885, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7509a938836ca50d09aa01364c29c0e376376912d0ca6818c1dc0151a063308a.jpg", + "image_caption": [ + "Fig. 3. Structural Frameworks for GAN Anomaly Detection." + ], + "image_footnote": [], + "bbox": [ + 532, + 375, + 888, + 588 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "generated by the generator). The generator and discriminator are trained simultaneously through a process where the generator tries to produce data that can fool the discriminator, and the discriminator tries to improve its ability to distinguish between real and fake data. Table II provides a comprehensive summary of recent GAN-based AD models, categorizing them based on their techniques, approaches, strengths, and weaknesses. This table highlights how different GAN variants are tailored for specific AD tasks, along with the types of data they are applied to and their publication years.", + "bbox": [ + 501, + 642, + 921, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The training process of GANs can be described as a minimax game with the following objective function:", + "bbox": [ + 503, + 795, + 921, + 825 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\min _ {G} \\max _ {D} V (D, G) = \\mathbb {E} _ {x \\sim p _ {d a t a} (x)} [ \\log D (x) ] \\\\ + \\mathbb {E} _ {z \\sim p _ {z} (z)} [ \\log (1 - D (G (z))) ]. \\quad (1) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 522, + 849, + 919, + 892 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this function, $p_{data}(x)$ represents the distribution of the real data, $p_z(z)$ represents the distribution of the noise vector $z$ , $G(z)$ is the data generated by the generator, and $D(x)$ is the", + "bbox": [ + 503, + 898, + 921, + 946 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/09194629b0767868e7df5246fea6e4a2179c838fa9818a8cc9e274fc26ee5ae9.jpg", + "table_caption": [ + "TABLEI COMPARISON OF GANS, VAES, AND DIFFUSION MODELS IN ANOMALY DETECTION" + ], + "table_footnote": [], + "table_body": "
ModelStrengthsWeaknesses
GANs• Capable of generating high-fidelity, realistic samples.\n• Learns complex data distributions using adversarial loss.\n• Useful in AD by distinguishing real vs. generated data.• Prone to mode collapse, leading to low sample diversity.\n• Hard to train with difficult-to-interpret losses.\n• Training is unstable and hard to converge.
VAEs• Easy to train with one tractable likelihood loss.\n• Provides high sample diversity by covering all data modes.\n• Latent space representation is useful for AD tasks.• Produces low-fidelity, often blurry samples.\n• Pixel-based loss leads to sample ambiguity and blurriness.
Diffusion Models• Generates high-fidelity samples with gradual refinement.\n• High sample diversity due to likelihood maximization.\n• Intermediate noisy images serve as useful latent codes for AD.• Slow sample generation due to the multi-step denoising process.\n• Computationally intensive, requiring many steps for both forward and reverse diffusion.
", + "bbox": [ + 94, + 104, + 901, + 210 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "probability that $x$ is real. The generator $G$ aims to minimize this objective, while the discriminator $D$ aims to maximize it. The discriminator updates its weights to maximize the probability of correctly classifying real and generated data, while the generator updates its weights to minimize the discriminator's ability to distinguish between real and fake data.", + "bbox": [ + 73, + 236, + 491, + 325 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In the context of AD, GANs play crucial roles in both representation learning and data augmentation, each serving distinct purposes within deep Learning [85]. In representation learning, the primary objective of GANs is to learn and model the underlying distribution of the data, enabling the generation of synthetic data that closely resembles real data. This process involves a generator that creates fake data from random noise and a discriminator that distinguishes between real and fake data. Through iterative training, the generator improves its ability to produce realistic data, which is particularly useful in tasks like AD. For example, in [86], GANs are used for representation learning by generating fake data that matches the distribution of normal data. This generated data is then used to train a VAE to detect anomalies through reconstruction errors. Similarly, in [87], a fault-attention generative probabilistic adversarial autoencoder (FGPAA) is proposed, combining GANs and autoencoders for AD by learning the low-dimensional manifold of healthy state data. The GAN component aids in feature representation learning, reducing signal information loss and enhancing the model's ability to detect anomalies through distribution probability and reconstruction error.", + "bbox": [ + 73, + 325, + 491, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "There are two main structures to using GANs for AD, as shown in Fig.3. The first approach is based on the generator, as depicted in the lower part of Fig.3, highlighted by the yellow box. The basic idea is to train the GAN on normal data and then use the reconstruction error to identify anomalies. During the training phase, the GAN is trained exclusively on normal data, allowing the generator to learn to produce data that closely mimics the normal data distribution. During the detection phase, a test data point $x$ is fed into the generator to obtain the reconstructed data $G(x)$ . The reconstruction error, typically measured as the difference between the original data point $x$ and the reconstructed data $G(x)$ , is then used to detect anomalies. This can be quantified using metrics such as mean squared error (MSE). If the reconstruction error exceeds a predefined threshold, the data point is classified as an anomaly. The intuition behind this approach is that the generator, trained solely on normal data, will struggle to accurately reconstruct anomalous data, resulting in a high reconstruction error.", + "bbox": [ + 73, + 643, + 491, + 912 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The mathematical representation for AD using GANs involves computing the reconstruction error $E(x)$ as follows:", + "bbox": [ + 73, + 914, + 491, + 945 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nE (x) = \\| x - G (x) \\| ^ {2}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 635, + 250, + 919, + 267 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\| \\cdot \\| ^2$ denotes the squared Euclidean distance. A threshold $\\tau$ is set, and if $E(x) > \\tau$ , the data point $x$ is considered an anomaly. For example, Dong et al. [88] propose a semi-supervised approach for video AD using a dual discriminator-based GAN structure, focusing on representation learning. In this approach, the generator predicts future frames for normal events, and anomalies are detected by evaluating the quality of these predictions. Similarly, Guo et al. [89] introduce RegraphGAN, a graph generative adversarial network specifically designed for dynamic graph AD. RegraphGAN utilizes GAN-based representation learning to encode complex spatiotemporal relationships in graph data, allowing it to better capture anomalies. By leveraging encoders to project input samples into a latent space and integrating GANs to enhance both training stability and efficiency, RegraphGAN significantly improves AD performance over existing methods.", + "bbox": [ + 501, + 273, + 921, + 517 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The second approach leverages the discriminator highlighted by the green box in Fig.3. A well-trained discriminator has the ability to differentiate between real (normal) and fake (anomalous) samples. During the detection phase, test samples are directly input to the discriminator, which evaluates the likelihood that a given sample is real. If the discriminator assigns a low probability to a sample, suggesting that it is likely fake or anomalous, the sample is flagged as an anomaly. This method relies on the discriminator's capacity to recognize deviations from the normal data distribution it learned during training. For instance, Liu et al. [90] propose a GAN framework that uses multiple generators to produce potential outliers, which are then distinguished from normal data by a discriminator to detect anomalies. The discriminator's output score is used to evaluate the anomaly degree of input data, providing a comprehensive reference distribution and preventing mode collapse.", + "bbox": [ + 501, + 518, + 921, + 775 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Additionally, GANs are highly effective in data augmentation, helping to mitigate the scarcity of anomaly samples, which often results in data imbalance and poor generalization [91]. When anomaly samples are unevenly distributed or lacking in diversity, models struggle to learn rare anomalies and can overfit to the training set, reducing their accuracy on unseen data. Traditional data augmentation techniques—such as scaling, rotation, random cropping, translation, flipping, and copy-paste—attempt to mitigate these issues. However, simple linear transformations fail to capture new distributions and features of unknown anomalies, such as random changes in", + "bbox": [ + 501, + 777, + 921, + 945 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "shape or texture. This is where GANs provide a significant advantage. By generating synthetic anomaly data that mimics the distribution of real-world anomalies, GANs enable models to learn a more diverse set of anomaly features. This not only addresses the imbalance problem but also improves the model's generalization capabilities, as it learns to detect anomalies based on a broader range of characteristics beyond those present in the original training dataset. Miao et al. [92] introduce an unsupervised AD framework that uses data augmentation through contrastive learning and GANs to mitigate overfitting. By employing a geometric distribution mask, it enhances data diversity and generates synthetic anomaly samples, addressing the scarcity of anomaly data. In [93], Anomaly-GAN addresses data augmentation by using a mask pool, anomaly-aware loss, and local-global discriminators to generate high-quality, realistic synthetic anomalies with diverse shapes, angles, spatial locations, and quantities in a controllable manner. Li et al. [94] propose augmented time regularized generative adversarial network that combines an augmented filter layer and a novel temporal distance metric to generate high-quality and diverse artificial data, addressing the limitations of existing GAN approaches in handling limited training data and temporal order.", + "bbox": [ + 73, + 69, + 491, + 416 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "2) AE-based Anomaly Detection: In recent years, the limitations of traditional AE models in handling complex and noisy data have become more apparent, leading to the development of enhanced methods to improve their performance in AD tasks. For example, Fan et al. [97] introduce a new framework by incorporating $\\ell_{2,1}$ -norm into the AE, and experiments have demonstrated that this framework can significantly improve ADn accuracy by increasing the model's robustness to noise and outliers during training. Wang et al. [98] demonstrate that introducing an adaptive-weighted loss function can effectively suppress anomaly reconstruction, thereby improving the accuracy of AD. Liu et al. [99] introduce a multi-scale convolutional AE architecture, where multiple stacked convolutional encoder-decoder layers act as background learners to robustly eliminate anomalies of varying sizes during background reconstruction. Additionally, Lin et al. [100] introduce a soft calibration strategy combined with AE to address the issue of data contamination in AD.", + "bbox": [ + 73, + 416, + 491, + 686 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "VAEs are another generative model widely used in AD tasks. Like GANs, VAEs aim to learn the distribution of normal data to identify anomalies. However, unlike GANs, which rely on adversarial training between a generator and a discriminator, VAEs use an encoder-decoder architecture. Fig.4 illustrates the structure of AD based on VAE. The goal of a VAE is to map the input data into a latent space through the encoder and model the data distribution probabilistically within this space. This approach allows the VAE to generate new data that closely resembles the true data distribution, and anomalies can be detected by evaluating the reconstruction error.", + "bbox": [ + 73, + 686, + 491, + 867 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The internal structure of a VAE is similar to that of a traditional AE but with some key differences. First, the encoder in a VAE not only compresses the input data into a lower-dimensional latent space but also learns a probabilistic distribution, typically parameterized by a mean $\\mu$ and a vari", + "bbox": [ + 73, + 869, + 491, + 946 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/fd57f4067bb6f3c6e9b25b5ccfc25170dec2d8afbbeb522aa5c35f6a09e5a7e3.jpg", + "image_caption": [ + "Fig. 4. Structural Frameworks for VAE Anomaly Detection." + ], + "image_footnote": [], + "bbox": [ + 532, + 66, + 888, + 200 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ance $\\sigma^2$ as shown in Fig.4. This enables the VAE to generate more meaningful latent variables $z$ , enhancing the diversity and robustness of the generated data. A critical component introduced in VAEs is the Kullback-Leibler (KL) divergence, which measures the difference between the latent distribution generated by the encoder and a predefined prior distribution (usually a standard normal distribution). Unlike traditional AEs, which focus solely on minimizing the reconstruction error, VAEs are trained by minimizing a combination of the reconstruction error and the KL divergence:", + "bbox": [ + 501, + 247, + 924, + 400 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {V A E}} = \\mathbb {E} _ {q (z | x)} [ \\log p (x | z) ] - D _ {\\mathrm {K L}} (q (z | x) \\| p (z)). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 547, + 407, + 919, + 425 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This difference makes VAEs more powerful in AD because they not only consider the quality of the data reconstruction but also enforce a structured latent space through the KL divergence. By doing so, KL divergence helps to regularize the latent space, ensuring that the encoded representations are smoothly distributed and centered around the prior distribution. This regularization reduces overfitting, promotes better generalization, and makes it easier to distinguish between normal and anomalous data, especially in complex and high-dimensional datasets. Table III provides a comprehensive summary of the latest advancements in VAE-based AD models, showcasing innovative enhancements that address various challenges such as noise robustness, semantic feature learning, and anomaly reconstruction. Huang et al. [101] enhance VAE-based AD by incorporating an Autoencoding Transformation into the model, which ensures that the training phase effectively captures high-level visual semantic features of normal images, thereby increasing the anomaly score gap between normal and anomalous samples. Similarly, Yin et al. [102] utilize Convolutional Neural Network (CNN) and VAE with a two-stage sliding window approach in data preprocessing to learn better representations for AD tasks. Zhang Yin et al. [103] propose the Graph Relational Learning Network (GReLeN), which integrates a VAE structure with graph dependency learning for AD in multivariate time series through reconstruction. Zhou et al. [104] propose a variational long short-term memory (VLSTM) model for high-dimensional AD in imbalanced datasets, combining a compression network for efficient data representation with an estimation network for accurate classification of network traffic data. The VLSTM model balances data compression and feature retention using core LSTM and variational modules.", + "bbox": [ + 501, + 431, + 924, + 912 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In recent years, many advancements in AD models inspired by VAEs have focused on Adversarial Autoencoders (AAEs)", + "bbox": [ + 503, + 914, + 921, + 945 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/0a1dd839ad5ce27d8ca8ac3dde9e2c28a00c69f076a04ee08e2228fd8db6d21b.jpg", + "table_caption": [ + "TABLE II GAN-BASED MODELS IN ANOMALY DETECTION" + ], + "table_footnote": [], + "table_body": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[90]GANReconstructionDoes not depend on assumptions about the normal data and requires less computing resources.The method involves the selection of multiple hyperparameters, making the tuning process challenging and potentially time-consuming.Structured data2020
[48]GAN+CNNPredictionThe NM-GAN model enhances both the generalization and discrimination abilities through noise-modulated adversarial learning, resulting in improved accuracy and stability for video AD.The model struggles to fully capture complex temporal patterns like staying, wandering, and running, and lacks adaptive modulation of generalization and discrimination abilities, leaving room for improvement in spatiotemporal feature learning.Video data2021
[94]GANReconstructionIs capable of generating more effective artificial samples for training supervised learning models, thereby addressing the issue of data imbalance.Its performance is inferior to the baseline algorithms when the balanced ratio is 0.125.Image data2021
[95]GAN+LSTMPredictionThe TMANomaly framework excels in capturing complex multivariate correlations in industrial time series data, enhancing AD accuracy through mutual adversarial training.The paper lacks discussion on TMANomaly's generalization to other datasets, the potential limitations of using GRA for feature selection, and the computational efficiency or scalability, which are critical for real-time industrial systems.Multivariate time series data2022
[96]GAN+LSTMPredictionFGANomaly method effectively filters anomalous samples before training, improving AD accuracy and robustness by precisely capturing normal data distribution and dynamically adjusting generator focus.The method lacks effective fusion of information across different dimensions in multivariate time series, which limits its ability to fully capture complex correlations.Multivariate time series data2022
[93]GANReconstructionImproves the quality of the generated anomaly images and generates anomalies with different shapes, rotation angles, spatial locations, and numbers in a controllable manner.The images generated are not very sensitive to the change of light.Image data2023
[89]GANReconstructionImproves training efficiency and stability in dynamic graph AD while avoiding the expensive optimization process typical of traditional graph generative adversarial networks.The detection accuracy on the UCI Message dataset is lower than that of TADDY.Dynamic graph data2023
[92]GAN+TransformerReconstructionIt can effectively detect anomalies in long sequences, mitigates overfitting, and incorporates contrastive loss into the discriminator to fine-tune the GAN, ensuring strong generalization ability.It may struggle with irregularly sampled data or datasets with many missing values, requires careful tuning of several hyperparameters, and demands significant computational resources, posing challenges for real-time processing on limited-capacity devices.Multivariate time series data2024
", + "bbox": [ + 76, + 102, + 931, + 584 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "[105]. Unlike traditional VAEs, which use KL divergence to match the latent space distribution to a prior, AAEs achieve this through the use of GANs. Specifically, AAEs employ a GAN's discriminator to evaluate the latent variable distribution produced by the encoder and use adversarial training to align it with the desired prior distribution, providing more flexible control over the quality of the generated data. Wu et al. [87] propose the Fault-Attention Generative Probabilistic Adversarial Autoencoder (FGPAA) for machine AD, utilizing an end-to-end AAE with double discriminators to extract relevant features and ensure accurate equipment health monitoring through a fault-attention probability distribution. Idrissi et al. [51] apply AAE and FL in the field of network intrusion detection, effectively ensuring AD performance while safeguarding client privacy. Experimental results demonstrate that the proposed model outperforms AE, VAE, and AAE on various network traffic datasets, achieving high performance across different metrics. Su et al. [106] propose two contamination-immune BiGAN models, integrating elements of VAE and BiGAN to create a new AAE-based framework that effectively detects anomalies by learning the probability distribution of normal samples from contaminated datasets, significantly outperform", + "bbox": [ + 73, + 609, + 491, + 941 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "ing state-of-the-art methods in scenarios where training data is impure. Similar to the aforementioned AAE models, Du et al. use GANs to purify the original dataset, generating synthetic \"normal\" data to improve outlier detection accuracy. Continuing the advancements in AAE-based models, Yu et al. [107] introduce an Adversarial Contrastive Autoencoder (ACAE) for Multivariate Time Series (MTS) AD, which enhances feature representation through adversarial training and contrastive learning, demonstrating superior performance across multiple real-world datasets, further extending the application of AAE-based methods in robust AD.", + "bbox": [ + 501, + 609, + 921, + 773 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3) Diffusion model-Based for Anomaly Detection: Diffusion models are a type of generative model that operate through two key phases: a fixed forward diffusion process and a learnable reverse diffusion process [108]. Mathematically, the forward process involves progressively adding Gaussian noise to the data $x_0$ , transforming it into pure noise $x_T$ over $T$ steps. This process can be described as:", + "bbox": [ + 503, + 777, + 921, + 882 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(x _ {t} \\mid x _ {t - 1}\\right) = \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t}} x _ {t - 1}, \\beta_ {t} I\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 888, + 921, + 907 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $q(x_{t}|x_{t - 1})$ is the conditional probability distribution of $x_{t}$ given $x_{t - 1}$ , $\\beta_{t}$ is the noise variance at step $t$ , and $x_{t}$", + "bbox": [ + 503, + 914, + 921, + 945 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 40 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/11c660ad378396e7d5a12ad6c18a5c71bc6887df1411ff0efc0d981bd187e2f7.jpg", + "table_caption": [ + "TABLE III AUTOENCODER-BASED MODELS IN ANOMALY DETECTION" + ], + "table_footnote": [], + "table_body": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[104]VAE-based (VAE+LSTM)ReconstructionEffectively addresses imbalanced and high-dimensional challenges in industrial big data.Falls short in achieving the highest AUC and F1 scores compared to other methods.Industrial big data2020
[87]AAE-basedReconstructionFGPAA reduces information loss during feature extraction and constructs fault attention anomaly indicators using low-dimensional feature probability and reconstruction error.Runtime is approximately five times longer than SOM.Rotating machine fault simulator data2020
[98]AE-based (AE+CNN)ReconstructionThe Auto-AD method enables fully autonomous hyperspectral AD, automatically separating anomalies based on reconstruction errors without the need for manual tuning or additional processing.Lower AUC score compared to the GRX method on the Honghu dataset.Hyperspectral data2021
[99]AE-based (AE+CNN)ReconstructionMSNet offers an effective solution to handle multiscale anomaly shapes, providing greater flexibility without the need for threshold fine-tuning.Multiple convolutional encoder-decoder layers and enhanced training increase computational cost and training time.Hyperspectral data2021
[101]VAE-based (VAE+Transformer)ReconstructionSSR-AE leverages self-supervised learning to enhance normal data reconstruction and hinder abnormal data, optimizing mutual information for effective transformation and image reconstruction.Struggles with transformations, heavily relying on their effectiveness for AD.Image data2021
[97]AE-basedReconstructionMaintains geometric structure and local spatial coherence of hyperspectral images (HSI), reducing search space and execution time per pixel.High execution time for constructing the SuperGraph matrix with large datasets.Hyperspectral data2021
[51]AAE-based (AAE+Federated learning)ReconstructionFed-ANIDS demonstrates strong generalization, outperforms GAN-based models, and ensures privacy protection through federated learning.Computational overhead due to the federated learning framework, increasing training complexity and latency.Cybersecurity data2023
[100]AE-basedReconstructionApplicable for time series AD under data contamination.Assumes normal samples follow a Gaussian distribution, limiting applicability, and has higher computational complexity.Time series data2024
[106]AAE-basedReconstructionLearns the probability distribution of normal samples from contaminated datasets, achieving convergence and outperforming baseline models.Relies on the assumption that the contamination ratio is known, which may not always be accurate in practice.Medical image data2024
[86]AAE-basedReconstructionGenerates a clean dataset from contaminated data for AD, with linear scalability for larger datasets.Struggles with detection accuracy in datasets with multiple distribution patterns.Tabular data2024
[107]AAE-basedReconstructionExcels in learning high-level semantic features and capturing normal patterns of MTS with contrastive learning constraints, ensuring stability across parameter settings.Performance on all metrics for SMAP and PSM datasets is lower than baseline methods.Multivariate time series data2024
", + "bbox": [ + 81, + 102, + 915, + 604 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "represents the noisy data at step $t$ . As $t$ increases, the data becomes more corrupted by noise until it reaches a state of pure Gaussian noise at step $T$ .", + "bbox": [ + 73, + 632, + 491, + 678 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The reverse process learns to gradually denoise the data, removing the added noise step by step. The model learns a parameterized distribution $p_{\\theta}(x_{t - 1}|x_t)$ to reverse the noise addition process, reconstructing the original data from the noisy data. This reverse process is trained to minimize the variational bound on the data likelihood, expressed as:", + "bbox": [ + 73, + 685, + 490, + 777 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nL = \\mathbb {E} _ {q} \\left[ D _ {K L} \\left(q \\left(x _ {t - 1} \\mid x _ {t}, x _ {0}\\right) \\mid p _ {\\theta} \\left(x _ {t - 1} \\mid x _ {t}\\right)\\right) \\right]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 129, + 784, + 488, + 801 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "By progressively removing noise, diffusion models generate high-fidelity samples, first capturing coarse structures and then refining details in each step. In the context of AD, diffusion models are trained on normal data to learn the underlying data distribution through an iterative noise-removal process. Similar to other reconstruction-based methods, anomalies can be identified by evaluating the reconstruction error, where a higher error indicates that the data deviates from the learned normal patterns.", + "bbox": [ + 73, + 808, + 491, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Diffusion models stand out from GANs and VAEs in several key ways. They avoid common issues such as mode collapse in GANs, where only a subset of the data distribution is captured, leading to reduced diversity. Diffusion models also overcome the blurriness associated with VAEs, which often results from pixel-based loss and a smaller latent space. By iteratively denoising data, diffusion models maintain both high fidelity and diversity in their outputs.", + "bbox": [ + 501, + 631, + 921, + 753 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "While diffusion models are slower in generating samples due to their iterative nature, their ability to accurately reconstruct data and cover the full range of the training dataset makes them particularly well-suited for AD [109]. In AD, where precision is critical, diffusion models excel by generating detailed and high-quality samples, enabling them to identify subtle deviations from normal patterns with greater accuracy than other generative models. Several works have leveraged the advantages of diffusion models in ADn. For example, Zhang et al. [110] utilize the high-quality and diverse image generation capabilities of diffusion models to enhance reconstruction quality in DiffAD, addressing the limitations of", + "bbox": [ + 501, + 763, + 921, + 946 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "traditional methods by introducing noisy condition embedding and interpolated channels. Similarly, Li et al. [111] apply a diffusion model to reconstruct normal data distributions and integrate an auxiliary learning module with pretext tasks to better distinguish between normal and abnormal data. Expanding on these ideas, Zeng et al. [112] improve denoising diffusion probabilistic models (DDPMs) for radio AD by incorporating an AE to learn the distribution of normal signals and their power spectral density (PSD), using reconstruction error to identify anomalies. Li et al. [113] present a Controlled Graph Neural Network (ConGNN) approach based on DDPMs to address the challenge of limited labeled data. Li et al. [114] further explore diffusion models in vehicle trajectory AD, employing decoupled Transformer-based encoders to capture temporal dependencies and spatial interactions among vehicles, significantly improving AUC and F1 scores on real-world and synthetic datasets. Similarly, Pei et al. [115] establish the two-stage diffusion model (TSDM) to mitigate the influences of anomalies in smart grids, where the first stage is a diffusion-based AD component. In multi-class AD, He et al. [116] propose DiAD, a framework that enhances reconstruction accuracy through a combination of a semantic-guided network, spatial-aware feature fusion, and a pre-trained feature extractor to generate anomaly maps.", + "bbox": [ + 73, + 69, + 491, + 433 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B. Deep learning methods for Anomaly Detection based on Prediction", + "text_level": 1, + "bbox": [ + 73, + 444, + 491, + 473 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prediction-based AD methods operate by forecasting future values or estimating missing attributes and comparing these predictions to the actual observed values. When significant deviations occur, it indicates potential anomalies, as the data deviates from the learned normal patterns. These methods are versatile and can be applied across various data types, leveraging relationships between variables or temporal correlations to detect anomalies. Prediction-based methods excel in scenarios where capturing patterns and trends is essential. By learning underlying structures in the data, whether based on time dependencies or more general interactions between variables, these methods can effectively predict expected outcomes. Deviations from these expectations are flagged as anomalies. This makes prediction-based approaches highly adaptable, capable of functioning across different contexts, including various types of data. In this section, we explore three main approaches for prediction-based AD: Recurrent Neural Networks (RNNs), attention mechanisms, and Graph Neural Networks (GNNs), all of which have demonstrated efficacy in capturing intricate patterns and relationships within data to identify anomalies. These methods allow for flexible and robust AD across various data types by learning underlying patterns, whether they are based on spatial, temporal, or graph-based relationships. By leveraging these approaches, prediction-based methods can effectively model complex interactions, providing reliable detection of unexpected behaviors or deviations from learned patterns.", + "bbox": [ + 73, + 477, + 493, + 883 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "1) RNN-based Anomaly Detection: Recurrent Neural Networks (RNNs) [117] are a special type of neural network designed to process sequential data by capturing dependencies between elements in a sequence. Unlike standard neural", + "bbox": [ + 73, + 883, + 491, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "networks, RNNs incorporate a state vector $s_t$ in the hidden layer, allowing them to retain information from previous steps and model sequential patterns. This capability makes them effective in various applications where data has an inherent order, such as event logs, system monitoring, and structured sequences in cybersecurity or industrial processes. For an input $x_t$ at time $t$ , the update of the state value $s_t$ and hidden layer output $h_t$ in RNNs can be represented as", + "bbox": [ + 501, + 68, + 921, + 189 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {s} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {s} \\boldsymbol {s} _ {t - 1} + \\boldsymbol {b} ^ {s}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 195, + 919, + 219 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {h} _ {t} = \\operatorname {s o f t m a x} \\left(\\boldsymbol {W} ^ {h} \\boldsymbol {s} _ {t} + \\boldsymbol {b} ^ {h}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 215, + 790, + 232 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $\\sigma(\\cdot)$ is the sigmoid activation function, $W^x$ , $W^s$ and $W^h$ represent the network weights, and $b$ is the network biases. By maintaining a recurrent state, RNNs can effectively capture dependencies across different steps within a sequence, making them well-suited for tasks involving ordered data.", + "bbox": [ + 501, + 238, + 921, + 314 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "However, RNNs face the problem of exploding or vanishing gradients when dealing with long sequences. Long Short-Term Memory networks (LSTMs) [118], a specialized type of RNN, were introduced to address these issues. Specifically, LSTMs replace the hidden layer of RNNs with an LSTM block consisting of input, output, and forget gates. The inference process of LSTM at time $t$ is given by", + "bbox": [ + 501, + 316, + 921, + 422 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {f} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x f} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h f} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {f}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 428, + 828, + 446 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {i} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x i} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h i} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {i}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 448, + 820, + 467 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {c}} _ {t} = \\tanh \\left(\\boldsymbol {W} ^ {x \\tilde {c}} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h \\tilde {c}} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {\\tilde {c}}\\right) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 467, + 919, + 492 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c} _ {t} = \\boldsymbol {f} _ {t} \\boldsymbol {c} _ {t - 1} + \\boldsymbol {i} _ {t} \\tilde {\\boldsymbol {c}} _ {t}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 489, + 714, + 503 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {o} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x o} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h o} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {o}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 507, + 825, + 525 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {h} _ {t} = \\boldsymbol {o} _ {t} \\tanh \\left(\\boldsymbol {c} _ {t}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 527, + 705, + 542 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $f_{t}$ , $i_{t}$ , and $o_{t}$ are the forget, input and output gate weights, respectively. $c_{t}$ represents the cell state of LSTM, and $\\tanh(\\cdot)$ is the hyperbolic tangent activation function. By controlling the weights of the forget, input, and output gates, LSTM determines the importance of historical time series information and the current input on the current output, thus effectively mitigating issues of gradient vanishing and allowing robust modeling of complex sequences. Reference [119] provides comprehensive evidence of LSTM's effectiveness in AD across various technical systems, demonstrating its superiority in learning complex temporal behaviors and accurately identifying anomalies.", + "bbox": [ + 501, + 547, + 921, + 731 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The Gated Recurrent Unit (GRU) [120] is a simplified version of LSTM that only includes an update gate and a reset gate and uses the hidden state alone to represent both short-term and long-term information. These different types of RNNs can be used in prediction-based AD tasks, with the specific detection and inference method illustrated in Fig. 5. RNNs, LSTMs, and GRUs take time series data from $t - w$ to $t - 1$ as input, and their pre-trained neural networks use these temporally ordered data to predict the single-step or multi-step future values of the univariate or multivariate time series. If the difference between the actual and predicted values is below a threshold, no anomaly is detected; if the difference exceeds the threshold, an anomaly is detected and the spatiotemporal location of the anomaly is identified.", + "bbox": [ + 501, + 732, + 921, + 944 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/99343b86e303b16ed72ac102346e1a4f94400e20ddc1bebcd295923286e32817.jpg", + "table_caption": [ + "TABLE IV DIFFUSION-BASED MODELS IN ANOMALY DETECTION" + ], + "table_footnote": [], + "table_body": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[110]DiffusionReconstructionThe latent diffusion model (LDM) used in this method achieves state-of-the-art performance in surface AD by generating high-quality, semantically correct reconstructions, effectively avoiding overfitting to anomalies.It less suitable for real-time applications or environments with limited computational resources.Image data2023
[112]Diffusion+VAEReconstructionThe AE-DDPMs algorithm effectively improves stability and reduces computational costs in radio AD, outperforming GAN-based methods in complex electromagnetic environments.The anomalies in the experimental data are artificially generated, rather than originating from real-world conditions, which may limit the model's applicability to genuine, real-world scenarios.radio signal data2023
[113]Diffusion+GNNPredictionConGNN effectively addresses the issue of limited labeled data by generating augmented graph data using a graph-specific diffusion model.The reliance on graph-specific augmentation might not generalize well to other types of data, potentially limiting its applicability beyond graph-based AD.Image data2023
[111]Diffusion+VAEHybridSDAD effectively enhances AD by combining self-supervised learning for discriminative data representation with denoising diffusion.The generation of pseudo anomalies relies solely on standard Gaussian sampling, which may not fully capture the complexity of real anomalies, limiting the model's ability to accurately simulate genuine abnormal data.Structure data2024
[114]Diffusion+TransformerHybridDiffTAD effectively models temporal dependencies and spatial interactions in vehicle trajectories through diffusion models, significantly improving AD accuracy and robustness to noise.The anomalies are primarily evaluated on synthetic datasets, which may not fully reflect the complexity and diversity of real-world trajectory data.Vehicle trajectory data2024
", + "bbox": [ + 96, + 103, + 903, + 422 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/776e31311c1c4c6521d678b5659cbccf97515cff7d507f59b7a11d83bd281e30.jpg", + "image_caption": [ + "Fig. 5. RNN-based application example for time series data anomaly detection: (a) RNN-based, (b) LSTM-based, (c) GRU-based." + ], + "image_footnote": [], + "bbox": [ + 76, + 426, + 921, + 554 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Current RNN-based AD primarily focuses on improving RNN algorithms tailored to AD tasks and integrating RNN with other methods for AD. The method in [121] employs a pruning algorithm to reduce the number of false data points, enabling the LSTM-based AD approach to better address the challenges posed by the extremely uneven distribution of railway traffic data. LSTM combined with AE [122], VAE [123], and Singular Value Decomposition (SVD) [124] has also been used to identify anomalies in Controller Area Networks (CANs) [125], electrocardiograms, and Internet monitoring data. GANs based on adversarial learning have also been integrated into the time series learning of LSTM, achieving very high performance in scenarios with few features [95], extremely imbalanced training sets, and noise interference [96]. CNN is also integrated into LSTM in a serial [126], parallel [127], or as a foundational layer [128] to better extract the spatiotemporal correlations of multidimensional time series, thereby enhancing the performance of AD. GRUs, compared to LSTMs, have a more streamlined architecture, resulting in lower computational complexity during training and execution of AD tasks, and they tend to perform better on certain less complex sequential data. For instance, GRUs enhance interpretability by uncovering latent correlations in", + "bbox": [ + 73, + 595, + 493, + 945 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "multivariate time series data from industrial control system sensors [129]. Similar to LSTMs, GRUs can also be combined with AEs [130] or VAEs [25] in an encoder-decoder architecture to mitigate the effects of noise and anomalies, thereby improving the accuracy of AD.", + "bbox": [ + 501, + 595, + 919, + 670 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "2) Attention-based Anomaly Detection: The attention mechanism was initially applied in machine translation [131], with its core idea being to enable the neural network to focus on the relevant parts of the input values. While attention-based methods have shown great promise in time series AD, their applications are not limited to temporal data. These methods can effectively capture dependencies in various types of data, including spatial, spatiotemporal, and multimodal datasets. This flexibility broadens their use cases across different AD tasks. Compared to RNN-based approaches, they are better suited for long or complex sequences because attention can compute dependencies between all positions in the sequence simultaneously, while RNNs process sequences sequentially, step by step.", + "bbox": [ + 501, + 672, + 921, + 883 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 6 illustrates a typical attention-based model for AD. Among attention-based methods, the self-attention mechanism is particularly effective in capturing global dependencies across various types of sequential data, including temporal,", + "bbox": [ + 503, + 883, + 921, + 946 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/019b206e929dfe093eeec77a1d42acc96182676e7e7c79b8a51a4f5bc4ca29c2.jpg", + "image_caption": [ + "Fig. 6. Attention-based model for anomaly detection. The model first embeds sequential data using input embedding and positional encoding to preserve temporal dependencies. The multi-head attention mechanism captures long-range dependencies by processing interactions between all time steps. The feedforward layer then refines feature representations, and a dense interpolation layer enhances anomaly-related features before passing them to a fully connected network (FNN) for final AD." + ], + "image_footnote": [], + "bbox": [ + 119, + 65, + 867, + 210 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "spatial, and spatiotemporal inputs. For an input dataset $\\mathbf{X} = [x_{1}, x_{2}, \\dots, x_{t}]$ , the queries, keys, and values are defined as: $Q = X W_{Q}$ , $K = X W_{K}$ , and $V = X W_{V}$ , where $W_{Q}$ , $W_{K}$ , and $W_{V}$ are trainable weight matrices. The attention weights are then computed based on $Q$ , $K$ , and $V$ as", + "bbox": [ + 73, + 285, + 491, + 362 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha_ {i j} = \\frac {\\exp \\left(\\boldsymbol {Q} _ {i} \\boldsymbol {K} _ {j} ^ {\\top} / \\sqrt {\\boldsymbol {d} _ {k}}\\right)}{\\sum_ {j = 1} ^ {T} \\exp \\left(\\boldsymbol {Q} _ {i} \\boldsymbol {K} _ {j} ^ {\\top} / \\sqrt {\\boldsymbol {d} _ {k}}\\right)}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 366, + 490, + 407 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $d_k$ is the dimension of the keys. Finally, the output of the self-attention-based neural network, which takes into account the importance of each input value, is given by Attention $(Q, K, V) = \\alpha V$ .", + "bbox": [ + 73, + 411, + 491, + 473 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To enable the model to capture features of various patterns, multi-head attention is also well-suited for AD. The calculation of multiple heads is expressed as", + "bbox": [ + 73, + 477, + 490, + 523 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {M u l t i h e a d} \\left(\\boldsymbol {Q}, \\boldsymbol {K}, \\boldsymbol {V}\\right) = \\operatorname {C o n c a t} \\left(\\operatorname {h e a d} _ {1}, \\dots , \\operatorname {h e a d} _ {h}\\right) \\boldsymbol {W} _ {O}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 530, + 488, + 561 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where each head is computed as $\\mathrm{head}_i =$ Attention $(\\mathbf{Q}\\mathbf{W}_{Q_i},\\mathbf{K}\\mathbf{W}_{K_i},\\mathbf{V}\\mathbf{W}_{V_i})$ . Here, $W_{Q_i}$ $W_{K_i}$ and $W_{V_i}$ are trainable parameters for different heads, and $W_{O}$ is the linear transformation matrix for the output. Concat(head1,,headh) concatenates the outputs of all attention heads along the feature dimension. Attention-based methods can effectively capture long-term dependencies, improve computational efficiency, and enhance the interpretability of AD through visualized attention weight values. When applied to AD, differences in the distribution of attention weights between normal and anomalous time series can serve as the basis for AD.", + "bbox": [ + 73, + 561, + 491, + 742 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In the field of AD, particularly for time series data, there has been a growing number of studies proposing deep learning methods based on attention mechanisms. Autoencoders that combine convolution, LSTM, and self-attention mechanisms can better extract complex features from multivariate time series data and robustly detect anomalies in high noise conditions [132]. The Transformer, as a well-known attention-based model, has demonstrated superior performance in unsupervised prediction-based time series AD compared to LSTM, as it can learn the dynamic patterns of sequential data through self-attention mechanisms [133]. The Transformer-based AD utilizes attention-based sequence encoders for rapid inference, achieving an F1 score improvement of up to $17\\%$", + "bbox": [ + 73, + 748, + 491, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "on public datasets and reducing training time by as much as $99\\%$ compared to the baseline [134]. Despite its outstanding capabilities, the Transformer still faces certain bottlenecks in AD. Attention-based methods are prone to overfitting when data is insufficient. The method in [92] seamlessly integrates contrastive learning and GAN into the Transformer, utilizing data augmentation techniques and geometric distribution masking to expand the training data, thereby enhancing data diversity and improving accuracy by $9.28\\%$ .", + "bbox": [ + 501, + 285, + 921, + 422 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Attention mechanisms are also frequently applied in graph neural networks to jointly detect anomalies in time series data. Reference [135] proposes a novel efficient Transformer model based on graph learning methods, employing two-stage adversarial training to train the AD model and utilizing prototypical networks to apply the model to anomaly classification. A contrastive time-frequency reconstruction network for unsupervised AD is used for AD and localization [136], where attention mechanisms and graph convolutional networks update the feature information of each time point, combining points with similar feature relationships to dilute the influence of anomalous points on normal points. Reference [137] models the correlations between temporal variables using graph convolutional networks, while also using an attention-based reconstruction model to output the importance of time series data within each time window, achieving an average AD F1 score exceeding 0.96. For multimodal data, a multimodal graph attention network (M-GAT) and temporal convolutional networks are used to capture spatial-temporal correlations in multimodal time series and correlations between modalities [138], ultimately outputting anomaly scores through reconstruction or prediction. More details about the application of GNNs in AD will be elaborated in the next subsection.", + "bbox": [ + 501, + 426, + 921, + 773 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In addition to GNNs, CNNs can also incorporate attention mechanisms to enhance various metrics of AD. Reference [139] effectively captures the local features of subsequences by leveraging the locality of CNNs and combining it with positional embeddings. At the same time, Zhu et al. [139] employ attention mechanisms to extract global features from the entire time series, thereby enhancing the effectiveness and potential of detection. Many works have also introduced LSTM to extract temporal correlations in time series data based on CNN models with attention mechanisms. For example, Sun et al. [140] employ a sequential approach where 1D convolution is", + "bbox": [ + 501, + 777, + 921, + 946 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "first used to extract abstract features of the signal values at each time step, which are then input into a bidirectional long short-term memory network (Bi-LSTM), ultimately combining with attention mechanisms to make the model focus on locally important time steps. Meanwhile, Le et al. [141] integrate convolutional layers, LSTM layers, and self-attention layers into an autoencoder architecture to better extract complex features from multivariate time series. Similarly, Pei et al. [126] employ additional SVM to classify the attention weights based on a CNN-LSTM model with attention mechanisms to determine whether cyber-attacks have occurred in energy systems. The input data are the multimodal measurements from the deployed sensors.", + "bbox": [ + 73, + 69, + 491, + 265 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3) GNN-based Anomaly Detection: Graph Neural Networks (GNNs) have gained increasing attention in AD tasks, as many types of data can be naturally represented as graph structures [142]. Wu et al. [143] have demonstrated the effectiveness of GNNs in identifying anomalies within complex graph-structured data environments. As neural network models specifically designed to handle graph-structured data, GNNs define nodes, edges, and graphs, where nodes represent individual elements in the dataset, such as data points in a sequence, sensor readings in multivariate data, or entities in relational datasets—denoted as the set $V$ . Edges capture the relationships or dependencies between these elements, denoted as the set $E$ , and can represent temporal correlations, spatial dependencies, or more abstract relational connections depending on the context. The graph, represented as $G = (V, E)$ , captures the overall structure formed by nodes and edges. The primary operations in GNN training are message passing and aggregation, which are used to update and learn node features. Specifically, during message passing, each node receives information from its neighboring nodes and updates its own state. For a node $v$ , the message passing formula is given as", + "bbox": [ + 75, + 268, + 491, + 602 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {m} _ {v} ^ {(k)} = \\sum_ {u \\in \\mathcal {N} (v)} M S G \\left(\\boldsymbol {h} _ {u} ^ {(k - 1)}, \\boldsymbol {h} _ {v} ^ {(k - 1)}, \\boldsymbol {e} _ {u v}\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 129, + 606, + 490, + 643 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $\\mathcal{N}(v)$ denotes the set of neighboring nodes of $v$ , $h_u$ and $h_v$ are the features of nodes $u$ and $v$ at layer $k$ , and $e_{uv}$ represents the edge features. Subsequently, the received messages are aggregated with the current node state, and the node features are updated as", + "bbox": [ + 73, + 648, + 491, + 724 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {h} _ {v} ^ {(k)} = \\text {U P D A T E} \\left(\\boldsymbol {h} _ {v} ^ {(k - 1)}, \\boldsymbol {m} _ {v} ^ {(k)}\\right), \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 729, + 488, + 756 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $UPDATE(\\cdot, \\cdot)$ is the update function.", + "bbox": [ + 73, + 760, + 372, + 776 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As illustrated in Fig. 7, which uses time series data as an example, GNNs treat each variable in the multivariate time series as a node to capture complex relationships between different dimensions. While the primary focus here is on the predictive capabilities of GNNs, it is worth noting that they are also effective in reconstruction-based AD. The final decision on whether the input sequence is anomalous is primarily based on prediction errors or graph structure differences, with reconstruction errors serving as a supplementary indicator. GNN-based AD methods excel at modeling complex dependencies between time steps or sensors, offering flexibility to handle", + "bbox": [ + 73, + 777, + 491, + 946 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3330583da44f04aadda892ec09bf36e2ea653e3123da2117b4ea223ff767ce02.jpg", + "image_caption": [ + "Fig. 7. GNN-based method for anomaly detection with time series data. Time series data is embedded into a graph structure, where a spatial-temporal GNN extracts dependencies. The reconstruction module then estimates the original data. Anomalies are detected based on graph relational discrepancies (differences in predicted graph structure) and prediction discrepancies (differences between reconstructed and actual time series)." + ], + "image_footnote": [], + "bbox": [ + 506, + 61, + 919, + 277 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "both static and dynamic relationships across diverse time series structures. However, they still face challenges such as high computational complexity on large-scale graphs and difficulties in constructing optimal edge and graph configurations [144].", + "bbox": [ + 501, + 378, + 919, + 453 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In prediction-based GNN for AD, GDN [145] is a representative work that combines a structure learning approach with GNN, additionally using attention weights to predict time series values and detect anomalies based on the predictions. Similar methods include GTA [146] and CST-GL [147]. Furthermore, Liu et al. [148] propose a GNN-based contrastive learning model that generates prediction scores from high-dimensional attributes and local structures to detect anomalies, outperforming state-of-the-art methods on seven benchmark datasets. Beyond prediction-based methods, there are also reconstruction-based GNN approaches. For example, MTAD-GAT [149] employs a graph attention network as a spatiotemporal encoder to learn dependencies across variables and time, reconstructing the time series with a backbone reconstructor and identifying anomalies based on reconstruction errors. Similar techniques include VGCRN [150] and FuSAGNet [151].", + "bbox": [ + 501, + 454, + 921, + 710 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C. Deep learning methods for Anomaly Detection based on Hybrid Method", + "bbox": [ + 503, + 729, + 919, + 758 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In AD, reconstruction-based and prediction-based methods offer distinct but complementary approaches to identifying anomalies. Both methods rely on the discrepancy between the model's output and the actual input data as an indicator of abnormality. However, they diverge in how they handle data and their areas of application. Reconstruction-based methods focus on learning the underlying distribution of normal data. Once trained, the model attempts to recreate the input data. The reconstruction error, measured as the difference between the original data and its reconstruction, serves as a key indicator of anomalies. A high reconstruction error suggests that the data deviates from the normal patterns learned by the", + "bbox": [ + 501, + 763, + 921, + 946 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "model. This approach is particularly effective in cases where understanding the full structure or distribution of the data is crucial, such as in image-based AD or other high-dimensional datasets. In contrast, prediction-based methods focus on forecasting specific attributes or missing values from the data, rather than reconstructing the entire input. These methods typically predict future values or infer missing data points by leveraging known features. If the predicted values significantly deviate from the actual values, this signals a potential anomaly. Prediction-based methods are often more suited to feature-rich datasets, where predicting specific variables can help identify irregular patterns. For instance, in applications like fraud detection, predicting expected behaviors or transactions can reveal anomalies when the predicted outcomes differ from the observed ones. While both methods differ in their data processing approaches, they can be highly complementary. In many cases, combining reconstruction-based and prediction-based techniques within a hybrid framework allows for more robust AD. Reconstruction models capture the overall structure and patterns in the data, while prediction models focus on detecting deviations in specific variables or features. This combination can provide a more comprehensive solution for identifying anomalies in complex datasets across various domains. Tang et al. [152] utilize a U-Net module as the prediction module to perform future frame prediction, amplifying reconstruction errors for abnormal events, while another U-Net module is used as the reconstruction module to enhance predicted frames for normal events, thus improving the effectiveness of AD. Lv et al. [31] adopt a dilated convolution-based autoencoder to integrate prediction errors and reconstruction errors into the output anomaly scores, effectively improving the generalization capability of the detection model. Liu et al. [153] leverage a reconstruction model and a prediction model within an end-to-end semi-supervised AD framework to effectively capture inter-variable correlations and temporal dependencies in multivariate time series data from wind turbines. Additionally, by incorporating an auxiliary discriminator with adversarial training, the model can progressively improve performance using limited labeled data, enhancing the transition from unsupervised to supervised AD. Wei et al. [154] propose a hybrid deep-learning model combining LSTM and autoencoder for AD in indoor air quality data, where the LSTM captures long-term dependencies in time-series data and the autoencoder uses reconstruction loss to detect anomalies, effectively addressing both temporal correlations and reconstruction errors for improved detection accuracy.", + "bbox": [ + 73, + 68, + 491, + 765 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D. Summary and Insights", + "text_level": 1, + "bbox": [ + 75, + 787, + 253, + 803 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This section introduces three types of deep learning-based AD methods: reconstruction-based, prediction-based, and hybrid approaches. Reconstruction-based methods are particularly effective in handling high-dimensional and unsupervised data by learning intrinsic patterns and identifying deviations through reconstruction errors. Prediction-based methods excel at modeling temporal dependencies in time-series data, enabling the detection of unexpected patterns in dynamic environments. Hybrid approaches combine these strengths", + "bbox": [ + 73, + 808, + 491, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "to address complex scenarios where multiple anomaly types coexist. Notably, these methods demonstrate the power of deep learning in capturing intricate patterns and dependencies that traditional methods often miss, making them indispensable for tackling diverse and challenging AD tasks.", + "bbox": [ + 501, + 68, + 921, + 146 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "V. INTEGRATE TRADITIONAL METHOD AND DEEP LEARNING METHOD", + "text_level": 1, + "bbox": [ + 535, + 161, + 890, + 191 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In the field of AD, traditional methods and deep learning approaches each offer unique advantages. Traditional methods, such as clustering [155] and Support Vector Data Description [156], are often simpler, more interpretable, and computationally efficient. These methods excel in providing transparent decision-making processes, making them suitable for applications where model interpretability is crucial. On the other hand, deep learning methods, with their ability to model complex, high-dimensional data distributions, offer enhanced detection accuracy and adaptability, especially for large datasets and unstructured data like images and sequences.", + "bbox": [ + 501, + 195, + 921, + 361 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The integration of traditional and deep learning methods aims to leverage the interpretability and simplicity of traditional methods with the robustness and flexibility of deep learning techniques. By combining these approaches, researchers seek to create hybrid models that maintain accuracy while offering insights into the underlying decision-making process, improving both detection power and model transparency.", + "bbox": [ + 501, + 362, + 923, + 484 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A. Clustering method", + "text_level": 1, + "bbox": [ + 504, + 503, + 656, + 517 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Clustering models play a crucial role in unsupervised AD, particularly for textual data. These models group similar data points based on their proximity in feature space and identify anomalies as points that deviate from established clusters [157]. Common clustering techniques, such as k-means [158], Density-Based Spatial Clustering of Applications with Noise (DBSCAN) [159], and hierarchical clustering [160], work effectively for simpler datasets and offer the advantage of interpretability. By integrating clustering methods with deep learning, such as applying clustering post feature extraction by a neural network, it is possible to improve detection accuracy while maintaining an interpretable clustering structure. This hybrid approach is particularly useful in cases where data distribution varies, and flexible, context-aware AD is required. For instance, Li et al. [161] propose a method that extends fuzzy clustering with a reconstruction criterion and Particle Swarm Optimization (PSO) to detect anomalies in both amplitude and shape. This highlights how traditional clustering methods can benefit from optimization techniques to handle diverse anomaly types. Similarly, Markovitz et al. [162] introduce an innovative approach for AD in human actions by working directly on human pose graphs extracted from video sequences. By mapping these graphs to a latent space, clustering them, and applying a Dirichlet process-based mixture model, the method effectively leverages probabilistic modeling to enhance the robustness and flexibility of clustering for action recognition. In video AD, Qiu et al. [163] propose a convolution-enhanced self-attentive video auto-encoder", + "bbox": [ + 501, + 521, + 923, + 946 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 40 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "integrated with a dual-scale clustering module based on the K-means algorithm. This approach effectively distinguishes normal and abnormal video data by enhancing feature representations and addressing the fuzzy boundaries between them. Additionally, Peng et al. [33] introduce a multivariate ELM-MI framework combined with a dynamic kernel selection method. By employing hierarchical clustering on unlabeled data to determine kernels, this method enables unsupervised online detection of various anomaly types, including point and group anomalies, while reducing computational costs and improving robustness. These studies collectively highlight the potential of hybrid approaches that integrate clustering with advanced techniques like deep learning, probabilistic modeling, or optimization frameworks. Such methods leverage the interpretability and simplicity of traditional clustering while addressing its limitations in handling complex data, offering a promising pathway for accurate and flexible AD.", + "bbox": [ + 73, + 69, + 491, + 325 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B. Normalizing Flows", + "text_level": 1, + "bbox": [ + 75, + 338, + 230, + 353 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Normalizing Flows (NF) [164] offer a probabilistic framework for AD by estimating the probability distribution of data. Using a sequence of invertible transformations, NFs can model complex distributions, making them particularly effective for identifying anomalies as low-probability events. When integrated with deep learning models, such as CNNs or RNNs, NFs act as precise probabilistic estimators, complementing the feature extraction capabilities of deep networks. This hybrid framework enhances AD, particularly in high-dimensional or unstructured datasets.", + "bbox": [ + 73, + 356, + 491, + 505 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For instance, Yu et al. [165] propose FastFlow, a 2D normalizing flow module integrated with deep feature extractors like ResNet and Vision Transformers. By effectively modeling feature distributions and capturing both local and global relationships, FastFlow achieves state-of-the-art performance, with a $99.4\\%$ AUC on the MVTec AD dataset, while maintaining high inference efficiency. Similarly, Cho et al. [166] introduce Implicit Two-path Autoencoder (ITAE), which reconstructs normal video patterns by implicitly modeling appearance and motion features through two encoders and a shared decoder. NF enhances ITAE by estimating the density of normal embeddings, enabling robust detection of out-of-distribution anomalies, with strong results across six surveillance benchmarks. For multivariate time series data, Zhou et al. [167] combine a graph structure learning model with entity-aware normalizing flows to capture interdependencies and evolving relations among entities. By estimating entity-specific densities and employing a clustering strategy for similar entities, the extended MTGFlow_cluster improves density estimation accuracy, demonstrating superior performance on six benchmark datasets. Further expanding on the use of graphs, Dai et al. [168] propose Graph-Augmented Normalizing Flow (GANF), which incorporates a Bayesian network to model causal relationships among time series. This approach factorizes joint probabilities into conditional probabilities, improving density estimation and enabling effective detection of anomalies in low-density regions, as well as identifying distribution drifts.", + "bbox": [ + 73, + 507, + 491, + 912 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "These studies collectively highlight the strengths of integrating Normalizing Flows with traditional and deep learning-", + "bbox": [ + 73, + 914, + 491, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "based methods. By combining the interpretability and precision of probabilistic models with the expressive power of deep networks or graph structures, these hybrid approaches address the challenges of complex data distributions, offering scalable and robust solutions for diverse AD tasks. This synergy underscores the potential of such methods to push the boundaries of accuracy and adaptability in real-world applications.", + "bbox": [ + 501, + 68, + 921, + 176 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C. Support Vector Data Description", + "text_level": 1, + "bbox": [ + 504, + 194, + 754, + 209 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Support Vector Data Description (SVDD) [156] is a traditional machine learning method used to define a boundary around normal data points, effectively distinguishing them from anomalies. Unlike binary classification, SVDD is particularly effective for one-class classification tasks, where only normal data is available. This approach is computationally efficient and interpretable, as it provides a clear boundary between normal and abnormal points. By integrating SVDD with deep learning, researchers can enhance the boundary definition based on high-dimensional features extracted by a neural network, resulting in a model that combines the boundary precision of SVDD with the feature richness of deep learning. This hybrid model is highly effective in scenarios where boundary clarity and interpretability are paramount, such as in industrial monitoring or fraud detection.", + "bbox": [ + 501, + 212, + 921, + 438 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To improve latent representations, Zhou et al. [169] propose Deep SVDD-VAE, which jointly optimizes VAE and SVDD. The VAE reconstructs input data, and SVDD simultaneously defines a spherical boundary in the latent space, ensuring separability of normal and anomalous instances. This joint optimization significantly outperforms traditional AE-based methods, as shown on MNIST, CIFAR-10, and GTSRB datasets. For variable-length time series data, Ergen et al. [124] introduce an LSTM-based AD framework, where LSTM and SVDD are jointly optimized using modified objectives. This method extends seamlessly to GRU architectures, demonstrating strong performance across unsupervised, semisupervised, and supervised settings. Besides, Zhang et al. [170] propose Deep Structure Preservation SVDD (DSPSVDD), which simultaneously minimizes hypersphere volume and network reconstruction error. This dual objective ensures deep feature preservation and enhances AD performance, outperforming traditional SVDD models on datasets like MNIST and MVTec AD.", + "bbox": [ + 501, + 439, + 921, + 724 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "These studies highlight the strengths of combining SVDD with deep learning, where deep models enhance feature representation while SVDD ensures boundary precision. This hybrid framework effectively addresses limitations in both methods, offering a scalable and interpretable solution for complex AD tasks across diverse domains.", + "bbox": [ + 503, + 724, + 921, + 816 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D. Summary and Insights", + "text_level": 1, + "bbox": [ + 504, + 835, + 684, + 849 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This section explores the integration of traditional and deep learning methods for AD, highlighting how their complementary strengths can be combined. Traditional methods, known for their simplicity, interpretability, and computational efficiency, excel in scenarios where transparency is critical. In contrast, deep learning methods offer superior adaptability", + "bbox": [ + 501, + 854, + 921, + 946 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and accuracy, particularly for high-dimensional and unstructured data. By integrating these approaches, hybrid models can leverage the interpretability of traditional methods while retaining the robustness and flexibility of deep learning. This fusion not only enhances AD performance but also bridges the gap between accuracy and model transparency, making it a promising direction for future research.", + "bbox": [ + 73, + 69, + 491, + 175 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "VI. OPEN ISSUES AND FUTURE WORKS", + "text_level": 1, + "bbox": [ + 143, + 195, + 421, + 209 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A. Data Collection", + "text_level": 1, + "bbox": [ + 73, + 215, + 209, + 229 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Data scarcity and class imbalance remain major challenges in AD. Since anomalies are rare, obtaining large labeled datasets is costly and time-consuming, especially when expert annotation is required. Supervised learning struggles due to the lack of abnormal samples, while the overwhelming presence of normal data biases models toward common patterns. This problem is particularly critical in cybersecurity, healthcare, and industrial monitoring, where undetected anomalies can have serious consequences.", + "bbox": [ + 73, + 236, + 491, + 372 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Several approaches mitigate these issues. Semi-supervised and unsupervised learning exploit normal data distributions to detect deviations without requiring labeled anomalies [171] [172]. Data augmentation, synthetic data generation, and oversampling improve data balance by increasing the number of anomalous examples, helping models generalize better [173] [174]. Despite these advancements, challenges remain. Semi-supervised methods struggle with subtle anomalies that closely resemble normal data. Augmentation techniques, often based on simple transformations, may fail to capture complex domain-specific variations. Similarly, synthetic data generation may not fully reflect real-world anomaly diversity, leading to models biased toward normal samples. Moreover, even with augmentation, models risk overfitting to the majority class, compromising anomaly detection performance. Ensuring that models remain sensitive to rare anomalies while maintaining accuracy on normal data remains an ongoing challenge. Future research may focus on refining self-supervised learning [175], improving the diversity of synthetic samples [176], and developing more adaptive anomaly detection frameworks to enhance robustness in real-world applications.", + "bbox": [ + 73, + 373, + 493, + 690 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B. Computational Complexity", + "text_level": 1, + "bbox": [ + 73, + 713, + 282, + 728 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In AD, computational complexity is a crucial factor, especially for systems operating in real-time environments or handling large-scale datasets. The efficiency of an algorithm directly impacts its feasibility in fields like industrial monitoring, cybersecurity, and autonomous systems, where swift detection is essential. Many advanced models, particularly deep learning approaches like autoencoders, GANs, and LSTMs, are computationally intensive due to their complex architectures and iterative learning processes. This often leads to trade-offs between detection accuracy and computational efficiency, with continuous efforts aimed at optimizing models to reduce computational demands without sacrificing performance.", + "bbox": [ + 73, + 732, + 491, + 912 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Moreover, AD models frequently require substantial memory resources, especially when dealing with high-dimensional", + "bbox": [ + 73, + 914, + 491, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "or streaming data, making memory usage a crucial consideration. Techniques like memory-efficient architectures, data compression, and sparse modeling are commonly used to address this issue. Real-time AD adds further complexity, as algorithms must process incoming data and make rapid decisions in applications like autonomous driving and fraud detection [177], where even minimal delays can have severe consequences. Achieving real-time performance typically involves optimizing data processing speeds and decision-making through lightweight models [178] [179] and parallel processing techniques, such as GPU acceleration [180]. However, balancing real-time detection capabilities with high accuracy remains challenging.", + "bbox": [ + 501, + 69, + 921, + 265 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The tension between computational complexity and detection accuracy persists, as complex models often excel in detection but lack practical applicability for real-time or large-scale scenarios. Simpler models, though computationally efficient, may fail to detect nuanced anomalies. Hybrid models or multi-stage frameworks that deploy complex methods only as needed provide a potential solution. Additionally, future research may benefit from exploring distributed computing solutions, like cloud [181] or edge computing, to enhance real-time AD performance in resource-limited environments.", + "bbox": [ + 503, + 265, + 921, + 416 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C. Explainability and Interpretability", + "text_level": 1, + "bbox": [ + 504, + 429, + 761, + 444 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Deep learning methods have greatly advanced AD by capturing complex patterns in high-dimensional data. However, they are often criticized as \"black-box\" models due to their lack of transparency, making it challenging to understand why certain data points are flagged as anomalies. For fields like healthcare, finance, or industrial monitoring, accurate detection alone is insufficient; stakeholders also need clear explanations to understand why a particular anomaly was detected. This lack of interpretability limits the practical deployment of deep learning models, as the inability to justify decisions reduces trust and hinders adoption in critical applications.", + "bbox": [ + 501, + 446, + 921, + 613 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In fields like healthcare, where anomalies may be linked to medical diagnoses, or in finance, where fraud detection can carry legal implications, interpretability is essential. Transparent model decisions enable experts to validate results and make informed decisions. In safety-critical applications, such as autonomous driving or industrial equipment monitoring, understanding the rationale behind AD is vital for ensuring safety. One major challenge is balancing the trade-off between model interpretability and performance. Simpler models, like decision trees or linear regression, offer greater transparency but often lack the complexity needed to detect subtle anomalies in high-dimensional data. In contrast, deep learning models provide high accuracy but are harder to interpret.", + "bbox": [ + 503, + 613, + 921, + 808 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Ongoing research is exploring hybrid approaches, where interpretable models are combined with more complex ones, allowing for accurate AD with the added benefit of interpretability. For example, attention mechanisms [182] in neural networks can help highlight specific data regions influencing decisions, providing insights into the model's internal workings. Alternatively, tools like Local Interpretable Model-agnostic Explanations (LIME) and SHapley Additive exPlanations (SHAP) [2] can offer post-hoc explanations, improving", + "bbox": [ + 503, + 809, + 921, + 946 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 40 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "transparency without altering model structure. Future research could also focus on real-time explainability in time-sensitive applications, and incorporating domain knowledge or user feedback to enhance model interpretability.", + "bbox": [ + 73, + 69, + 491, + 130 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "D. Handling Diverse Types of Anomalies", + "text_level": 1, + "bbox": [ + 73, + 147, + 359, + 162 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In real-world AD, multiple types of anomalies often coexist, adding complexity to the detection process. Beyond point anomalies, which are the simplest, other types like contextual and collective anomalies are common, especially in dynamic environments. For instance, in intelligent transportation systems, anomalies may include both isolated incidents (e.g., a single vehicle's sudden deceleration) and collective patterns (e.g., multiple vehicles simultaneously slowing down), each requiring different detection methods. Effectively capturing these varied anomaly types requires flexible models capable of adapting to different anomaly patterns without focusing on only one type.", + "bbox": [ + 73, + 166, + 491, + 347 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Continuous research is needed to develop models that can generalize across anomaly types, enhancing adaptability and balancing detection accuracy with model flexibility. Hybrid approaches, for instance, can integrate different methods to capture diverse anomalies more effectively. The challenge remains in achieving this versatility without sacrificing accuracy, as models must maintain strong performance across different contexts. Future work may also explore multi-modal models [183] that combine different types of data, further improving detection capabilities by drawing from diverse data sources. These directions aim to create AD systems that are both robust and adaptable, capable of handling the complex and mixed nature of real-world anomaly scenarios.", + "bbox": [ + 73, + 347, + 493, + 544 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "VII. CONCLUSION", + "text_level": 1, + "bbox": [ + 214, + 558, + 352, + 571 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In this survey, we have provided a comprehensive overview of the recent advancements in AD with a primary focus on deep learning techniques from 2019 to 2024. By analyzing over 180 research papers from leading journals and conferences, we have explored how AD methods have evolved to address diverse challenges across various types of data. This survey categorizes and examines deep learning methods into reconstruction-based, prediction-based, and hybrid approaches, highlighting their strengths, limitations, and applications. Recognizing the simplicity, interpretability, and computational efficiency of traditional AD methods, we reviewed their integration with deep learning techniques. These hybrid approaches aim to leverage the strengths of both paradigms, enhancing robustness and efficiency in AD systems. This survey not only sheds light on the state-of-the-art techniques but also identifies gaps and opportunities for future research. By focusing on the latest trends and innovations, this work aims to inspire further exploration and advancements in the rapidly evolving field of AD.", + "bbox": [ + 73, + 577, + 493, + 864 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 233, + 878, + 331, + 891 + ], + "page_idx": 19 + }, + { + "type": "ref_text", + "text": "[1] L. Ruff, J. R. Kauffmann, R. A. Vandermeulen, G. Montavon, W. Samek, M. Kloft, T. G. Dietterich, and K.-R. Müller, “A unifying review of deep and shallow anomaly detection,” Proceed. IEEE, vol. 109, no. 5, pp. 756–795, 2021.", + "bbox": [ + 86, + 897, + 491, + 945 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[2] V. Vimbi, N. Shaffi, and M. Mahmud, \"Interpreting artificial intelligence models: a systematic review on the application of lime and shap in alzheimer's disease detection,\" Brain Informatics, vol. 11, no. 1, p. 10, 2024.", + "[3] F. Al-Turjman, H. Zahmatkesh, and R. Shahroze, “An overview of security and privacy in smart cities’ IoT communications,” Trans. Emerg. Telecommun. Technol., vol. 33, no. 3, p. e3677, 2022.", + "[4] Y. A. Qadri, A. Nauman, Y. B. Zikria, A. V. Vasilakos, and S. W. Kim, \"The future of healthcare internet of things: a survey of emerging technologies,\" IEEE Commun. Surv. Tutor., vol. 22, no. 2, pp. 1121-1167, 2020.", + "[5] M. Humayun, N. Jhanjhi, B. Hamid, and G. Ahmed, “Emerging smart logistics and transportation using IoT and blockchain,” IEEE Internet Things Mag., vol. 3, no. 2, pp. 58–62, 2020.", + "[6] S. H. Haji and S. Y. Ameen, \"Attack and anomaly detection in IoT networks using machine learning techniques: A review,\" Asian J. Res. Comput. Sci, vol. 9, no. 2, pp. 30-46, 2021.", + "[7] V. Mothukuri, P. Khare, R. M. Parizi, S. Pouriyeh, A. Dehghantanha, and G. Srivastava, \"Federated-learning-based anomaly detection for IoT security attacks,\" IEEE Internet Things J., vol. 9, no. 4, pp. 2545-2554, 2021.", + "[8] S. A. Al Mamun and J. Valimaki, “Anomaly detection and classification in cellular networks using automatic labeling technique for applying supervised learning,” *Proceedia Comput. Sci.*, vol. 140, pp. 186-195, 2018.", + "[9] M. E. Villa-Pérez, M. A. Alvarez-Carmona, O. Loyola-Gonzalez, M. A. Medina-Pérez, J. C. Velazco-Rossell, and K.-K. R. Choo, \"Semisupervised anomaly detection algorithms: A comparative summary and future research directions,\" Knowledge-Based Systems, vol. 218, p. 106878, 2021.", + "[10] G. Michau and O. Fink, \"Unsupervised transfer learning for anomaly detection: Application to complementary operating condition transfer,\" Knowledge-Based Systems, vol. 216, p. 106816, 2021.", + "[11] Y. Liang, J. Zhang, S. Zhao, R. Wu, Y. Liu, and S. Pan, \"Omni-frequency channel-selection representations for unsupervised anomaly detection,\" IEEE Trans. Image Process., 2023.", + "[12] B. Siegel, \"Industrial anomaly detection: A comparison of unsupervised neural network architectures,\" IEEE Sens. Lett., vol. 4, no. 8, pp. 1-4, 2020.", + "[13] P. Bergmann, M. Fauser, D. Sattlegger, and C. Steger, \"Mvtec ad-a comprehensive real-world dataset for unsupervised anomaly detection,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2019, pp. 9592-9600.", + "[14] S. Schmidl, P. Wenig, and T. Papenbrock, \"Anomaly detection in time series: a comprehensive evaluation,\" Proc. VLDB Endow., vol. 15, no. 9, pp. 1779-1797, 2022.", + "[15] S. Zhai, Y. Cheng, W. Lu, and Z. Zhang, \"Deep structured energy based models for anomaly detection,\" in Int. Conf. Mach. Learn. (ICML). PMLR, 2016, pp. 1100-1109.", + "[16] H. Sarmadi and A. Karamodin, “A novel anomaly detection method based on adaptive mahalanobis-squared distance and one-class knn rule for structural health monitoring under environmental effects,” Mech. Syst. Signal Process., vol. 140, p. 106495, 2020.", + "[17] I. Syarif, A. Prugel-Bennett, and G. Wills, “Unsupervised clustering approach for network anomaly detection,” in Netw. Digit. Technol., Int. Conf., NDT 2012, Proc., Part I. Springer, 2012, pp. 135–145.", + "[18] D. Samariya and A. Thakkar, “A comprehensive survey of anomaly detection algorithms,” Ann. Data Sci., vol. 10, no. 3, pp. 829–850, 2023.", + "[19] G. Pang, C. Shen, L. Cao, and A. V. D. Hengel, “Deep learning for anomaly detection: A review,” ACM Comput. Surv., vol. 54, no. 2, pp. 1-38, 2021.", + "[20] L. Bergman, N. Cohen, and Y. Hoshen, \"Deep nearest neighbor anomaly detection,\" arXiv preprint arXiv:2002.10445, 2020.", + "[21] K. Leung and C. Leckie, \"Unsupervised anomaly detection in network intrusion detection using clusters,\" in Proc. 28th Australas. Conf. Comput. Sci., vol. 38, 2005, pp. 333-342.", + "[22] H. Ringberg, A. Soule, J. Rexford, and C. Diot, \"Sensitivity of pca for traffic anomaly detection,\" in Proc. 2007 ACM SIGMETRICS Int. Conf. Meas. Model. Comput. Syst., 2007, pp. 109-120.", + "[23] D. Kwon, H. Kim, J. Kim, S. C. Suh, I. Kim, and K. J. Kim, “A survey of deep learning-based network anomaly detection,” Cluster Computing, vol. 22, pp. 949–961, 2019.", + "[24] A. Aldweesh, A. Derhab, and A. Z. Emam, \"Deep learning approaches for anomaly-based intrusion detection systems: A survey, taxonomy, and open issues,\" Knowl.-Based Syst., vol. 189, p. 105124, 2020." + ], + "bbox": [ + 513, + 70, + 921, + 944 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[25] L. Li, J. Yan, H. Wang, and Y. Jin, \"Anomaly detection of time series with smoothness-inducing sequential variational auto-encoder,\" IEEE Trans. Neural Netw. Learn. Syst., vol. 32, no. 3, pp. 1177-1191, 2020.", + "[26] G. Harshvardhan, M. K. Gourisaria, M. Pandey, and S. S. Rautaray, \"A comprehensive survey and analysis of generative models in machine learning,\" Comput. Sci. Rev., vol. 38, p. 100285, 2020.", + "[27] B. Nachman and D. Shih, \"Anomaly detection with density estimation,\" Phys. Rev. D, vol. 101, no. 7, p. 075042, 2020.", + "[28] A. B. Nassif, M. A. Talib, Q. Nasir, and F. M. Dakalbab, \"Machine learning for anomaly detection: A systematic review,\" IEEE Access, vol. 9, pp. 78658-78700, 2021.", + "[29] X. Ma, J. Wu, S. Xue, J. Yang, C. Zhou, Q. Z. Sheng, H. Xiong, and L. Akoglu, “A comprehensive survey on graph anomaly detection with deep learning,” IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12012–12038, 2021.", + "[30] X. Xia, X. Pan, N. Li, X. He, L. Ma, X. Zhang, and N. Ding, “Gan-based anomaly detection: A review,” Neurocomputing, vol. 493, pp. 497-535, 2022.", + "[31] J. Lv, Y. Wang, and S. Chen, \"Adaptive multivariate time-series anomaly detection,\" Inf. Process. Manag., vol. 60, no. 4, p. 103383, 2023.", + "[32] M. Y. I. Basheer, A. M. Ali, N. H. A. Hamid, M. A. M. Ariffin, R. Osman, S. Nordin, and X. Gu, \"Autonomous anomaly detection for streaming data,\" Knowledge-Based Systems, vol. 284, p. 111235, 2024.", + "[33] X. Peng, H. Li, F. Yuan, S. G. Razul, Z. Chen, and Z. Lin, \"An extreme learning machine for unsupervised online anomaly detection in multivariate time series,\" Neurocomputing, vol. 501, pp. 596-608, 2022.", + "[34] Y. Choi, H. Lim, H. Choi, and I.-J. Kim, \"Gan-based anomaly detection and localization of multivariate time series data for power plant,\" in Proc. 2020 IEEE Int. Conf. Big Data Smart Comput. (BigComp). IEEE, 2020, pp. 71-74.", + "[35] H.-T. Duong, V.-T. Le, and V. T. Hoang, \"Deep learning-based anomaly detection in video surveillance: a survey,\" Sensors, vol. 23, no. 11, p. 5024, 2023.", + "[36] S. Thudumu, P. Branch, J. Jin, and J. Singh, \"A comprehensive survey of anomaly detection techniques for high dimensional big data,\" Journal of Big Data, vol. 7, pp. 1-30, 2020.", + "[37] I. Souiden, M. N. Omri, and Z. Brahmi, “A survey of outlier detection in high dimensional data streams,” Comput. Sci. Rev., vol. 44, p. 100463, 2022.", + "[38] Q. Ding and E. D. Kolaczyk, “A compressed pca subspace method for anomaly detection in high-dimensional data,” IEEE Trans. Inf. Theory, vol. 59, no. 11, pp. 7419–7433, 2013.", + "[39] M. Sakurada and T. Yairi, \"Anomaly detection using autoencoders with nonlinear dimensionality reduction,\" in Proc. MLSDA 2014 2nd Workshop Mach. Learn. Sensory Data Anal., 2014, pp. 4-11.", + "[40] T. Cheng and B. Wang, \"Total variation and sparsity regularized decomposition model with union dictionary for hyperspectral anomaly detection,\" IEEE Trans. Geosci. Remote Sens., vol. 59, no. 2, pp. 1472-1486, 2020.", + "[41] L. Li, W. Li, Q. Du, and R. Tao, \"Low-rank and sparse decomposition with mixture of gaussian for hyperspectral anomaly detection,\" IEEE Trans. Cybern., vol. 51, no. 9, pp. 4363-4372, 2021.", + "[42] S. Han and S. S. Woo, “Learning sparse latent graph representations for anomaly detection in multivariate time series,” in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min., 2022, pp. 2977–2986.", + "[43] X. Ma and W. Shi, “Aesmote: Adversarial reinforcement learning with smote for anomaly detection,” IEEE Trans. Netw. Sci. Eng., vol. 8, no. 2, pp. 943–956, 2021.", + "[44] M. Kim, E. Ou, P.-L. Loh, T. Allen, R. Agasie, and K. Liu, \"Rnn-based online anomaly detection in nuclear reactors for highly imbalanced datasets with uncertainty,\" Nucl. Eng. Des., vol. 364, p. 110699, 2020.", + "[45] G. Dlamini and M. Fahim, “Dgm: a data generative model to improve minority class presence in anomaly detection domain,” Neural Comput. Appl., vol. 33, pp. 13635–13646, 2021.", + "[46] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, \"Adbench: Anomaly detection benchmark,\" Adv. Neural Inf. Process. Syst., vol. 35, pp. 32-142-32-159, 2022.", + "[47] Y. Zhang, Y. Chen, J. Wang, and Z. Pan, \"Unsupervised deep anomaly detection for multi-sensor time-series signals,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 2, pp. 2118-2132, 2023.", + "[48] D. Chen, L. Yue, X. Chang, M. Xu, and T. Jia, \"Nm-gan: Noise-modulated generative adversarial network for video anomaly detection,\" Pattern Recognition, vol. 116, p. 107969, 2021." + ], + "bbox": [ + 81, + 71, + 491, + 943 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[49] M. U. Hassan, M. H. Rehmani, and J. Chen, \"Anomaly detection in blockchain networks: A comprehensive survey,\" IEEE Commun. Surv. Tutor., vol. 25, no. 1, pp. 289-318, 2022.", + "[50] Y. Liu, S. Garg, J. Nie, Y. Zhang, Z. Xiong, J. Kang, and M. S. Hossain, \"Deep anomaly detection for time-series data in industrial IoT: A communication-efficient on-device federated learning approach,\" IEEE Internet Things J., vol. 8, no. 8, pp. 6348-6358, 2020.", + "[51] M. J. Idrissi, H. Alami, A. El Mahdaouy, A. El Mekki, S. Oualil, Z. Yartaoui, and I. Berrada, “Fed-anids: Federated learning for anomaly-based network intrusion detection systems,” Expert Syst. Appl., vol. 234, p. 121000, 2023.", + "[52] L. Cui, Y. Qu, G. Xie, D. Zeng, R. Li, S. Shen, and S. Yu, \"Security and privacy-enhanced federated learning for anomaly detection in IoT infrastructures,\" IEEE Trans. Ind. Inform., vol. 18, no. 5, pp. 3492-3500, 2022.", + "[53] X. Wang, J. Liu, T. Qiu, C. Mu, C. Chen, and P. Zhou, \"A real-time collision prediction mechanism with deep learning for intelligent transportation system,\" IEEE Trans. Veh. Technol., vol. 69, no. 9, pp. 9497-9508, 2020.", + "[54] G. Li, T.-H. Nguyen, and J. J. Jung, \"Traffic incident detection based on dynamic graph embedding in vehicular edge computing,\" Appl. Sci., vol. 11, no. 13, p. 5861, 2021.", + "[55] G. Li and J. J. Jung, \"Deep learning for anomaly detection in multivariate time series: Approaches, applications, and challenges,\" Inf. Fusion, vol. 91, pp. 93-102, 2023.", + "[56] C. Zhao, X. Chang, T. Xie, H. Fujita, and J. Wu, \"Unsupervised anomaly detection based method of risk evaluation for road traffic accident,\" Appl. Intell., vol. 53, no. 1, pp. 369-384, 2023.", + "[57] S. Li, A. Pandey, B. Hooi, C. Faloutsos, and L. Pileggi, \"Dynamic graph-based anomaly detection in the electrical grid,\" IEEE Trans. Power Syst., vol. 37, no. 5, pp. 3408-3422, 2022.", + "[58] X. Wang and S.-H. Ahn, “Real-time prediction and anomaly detection of electrical load in a residential community,” Appl. Energy, vol. 259, p. 114145, 2020.", + "[59] I. Siniosoglou, P. Radoglou-Grammatikis, G. Efstathopoulos, P. Fouliras, and P. Sarigiannidis, “A unified deep learning anomaly detection and classification approach for smart grid environments,” IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1137-1151, 2021.", + "[60] T. Fernando, H. Gammulle, S. Denman, S. Sridharan, and C. Fookes, \"Deep learning for medical anomaly detection-a survey,\" ACM Comput. Surv., vol. 54, no. 7, pp. 1-37, 2021.", + "[61] E. Šabić, D. Keeley, B. Henderson, and S. Nannemann, “Healthcare and anomaly detection: using machine learning to predict anomalies in heart rate data,” *Ai & Society*, vol. 36, no. 1, pp. 149–158, 2021.", + "[62] K. G. Al-Hashedi and P. Magalingam, “Financial fraud detection applying data mining techniques: A comprehensive review from 2009 to 2019,” Comput. Sci. Rev., vol. 40, p. 100402, 2021.", + "[63] W. Hilal, S. A. Gadsden, and J. Yawney, \"Financial fraud: a review of anomaly detection techniques and recent advances,\" Expert Syst. Appl., vol. 193, p. 116429, 2022.", + "[64] H. Fujita, A. Gaeta, V. Loia, and F. Orciuoli, “Resilience analysis of critical infrastructures: A cognitive approach based on granular computing,” IEEE Trans. Cybern., vol. 49, no. 5, pp. 1835–1848, 2019.", + "[65] V. K. Singh and M. Govindarasu, “A cyber-physical anomaly detection for wide-area protection using machine learning,” IEEE Trans. Smart Grid, vol. 12, no. 4, pp. 3514–3526, 2021.", + "[66] S. M. Nagarajan, G. G. Deverajan, A. K. Bashir, R. P. Mahapatra, and M. S. Al-Numay, \"TADF-cps: Intelligent anomaly detection framework towards cyber physical systems,\" Comput. Commun., vol. 188, pp. 81–89, 2022.", + "[67] T. Nakao, S. Hanaoka, Y. Nomura, M. Murata, T. Takenaga, S. Miki, T. Watadani, T. Yoshikawa, N. Hayashi, and O. Abe, \"Unsupervised deep anomaly detection in chest radiographs,\" J. Digit. Imaging, vol. 34, pp. 418-427, 2021.", + "[68] W. H. Pinaya, P.-D. Tudosiu, R. Gray, G. Rees, P. Nachev, S. Ourselin, and M. J. Cardoso, \"Unsupervised brain imaging 3d anomaly detection and segmentation with transformers,\" Med. Image Anal., vol. 79, p. 102475, 2022.", + "[69] L. Chen, Z. You, N. Zhang, J. Xi, and X. Le, “Utrad: Anomaly detection and localization with u-transformer,” Neural Networks, vol. 147, pp. 53–62, 2022.", + "[70] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, \"Anomaly detection in medical imaging with deep perceptual autoencoders,\" IEEE Access, vol. 9, pp. 118571-118583, 2021.", + "[71] R. L. Draelos, D. Dov, M. A. Mazurowski, J. Y. Lo, R. Henao, G. D. Rubin, and L. Carin, \"Machine-learning-based multiple abnormality" + ], + "bbox": [ + 514, + 70, + 919, + 944 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 905, + 31, + 919, + 40 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "prediction with large-scale chest computed tomography volumes,\" Med. Image Anal., vol. 67, p. 101857, 2021.", + "[72] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, \"Anomaly detection in medical imaging with deep perceptual autoencoders,\" IEEE Access, vol. 9, pp. 118571-118583, 2021.", + "[73] H. Zhao, Y. Li, N. He, K. Ma, L. Fang, H. Li, and Y. Zheng, \"Anomaly detection for medical images using self-supervised and translation-consistent features,\" IEEE Trans. Med. Imaging, vol. 40, no. 12, pp. 3641-3651, 2021.", + "[74] R. Nayak, U. C. Pati, and S. K. Das, “A comprehensive review on deep learning-based methods for video anomaly detection,” Image Vis. Comput., vol. 106, p. 104078, 2021.", + "[75] Y. Wang, T. Liu, J. Zhou, and J. Guan, \"Video anomaly detection based on spatio-temporal relationships among objects,\" Neurocomputing, vol. 532, pp. 141-151, 2023.", + "[76] N. Li, F. Chang, and C. Liu, \"Spatial-temporal cascade autoencoder for video anomaly detection in crowded scenes,\" IEEE Trans. Multimed., vol. 23, pp. 203-215, 2020.", + "[77] D. Chen, P. Wang, L. Yue, Y. Zhang, and T. Jia, “Anomaly detection in surveillance video based on bidirectional prediction,” Image Vis. Comput., vol. 98, p. 103915, 2020.", + "[78] M. H. Bhuyan, D. K. Bhattacharyya, and J. K. Kalita, “Network anomaly detection: methods, systems and tools,” IEEE Commun. Surv. Tutor., vol. 16, no. 1, pp. 303-336, 2013.", + "[79] S. Liu, B. Zhou, Q. Ding, B. Hooi, Z. Zhang, H. Shen, and X. Cheng, \"Time series anomaly detection with adversarial reconstruction networks,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 4, pp. 4293-4306, 2022.", + "[80] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, 2024.", + "[81] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio, \"Generative adversarial networks,\" Commun. ACM, vol. 63, no. 11, pp. 139–144, 2020.", + "[82] L. Yang, Z. Zhang, Y. Song, S. Hong, R. Xu, Y. Zhao, W. Zhang, B. Cui, and M.-H. Yang, \"Diffusion models: A comprehensive survey of methods and applications,\" ACM Comput. Surv., vol. 56, no. 4, pp. 1-39, 2023.", + "[83] S. Bond-Taylor, A. Leach, Y. Long, and C. G. Willcocks, “Deep generative modelling: A comparative review of vaes, gans, normalizing flows, energy-based and autoregressive models,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 44, no. 11, pp. 7327-7347, 2021.", + "[84] S. Sheynin, S. Benaim, and L. Wolf, “A hierarchical transformation-discriminating generative model for few shot anomaly detection,” in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2021, pp. 8495-8504.", + "[85] W. Lim, K. Y. S. Chek, L. B. Theng, and C. T. C. Lin, “Future of generative adversarial networks (gan) for anomaly detection in network security: A review,” Comput. Secur., p. 103733, 2024.", + "[86] X. Du, J. Chen, J. Yu, S. Li, and Q. Tan, \"Generative adversarial nets for unsupervised outlier detection,\" Expert Syst. Appl., vol. 236, p. 121161, 2024.", + "[87] J. Wu, Z. Zhao, C. Sun, R. Yan, and X. Chen, “Fault-attention generative probabilistic adversarial autoencoder for machine anomaly detection,” IEEE Trans. Ind. Inf., vol. 16, no. 12, pp. 7479–7488, 2020.", + "[88] F. Dong, Y. Zhang, and X. Nie, \"Dual discriminator generative adversarial network for video anomaly detection,\" IEEE Access, vol. 8, pp. 88170-88176, 2020.", + "[89] D. Guo, Z. Liu, and R. Li, \"Regraphgan: A graph generative adversarial network model for dynamic network anomaly detection,\" Neural Networks, vol. 166, pp. 273-285, 2023.", + "[90] Y. Liu, Z. Li, C. Zhou, Y. Jiang, J. Sun, M. Wang, and X. He, \"Generative adversarial active learning for unsupervised outlier detection,\" IEEE Trans. Knowl. Data Eng., vol. 32, no. 8, pp. 1517-1528, 2019.", + "[91] C. Liu, Z. Kong, S. Babu, C. Joslin, and J. Ferguson, \"An integrated manifold learning approach for high-dimensional data feature extractions and its applications to online process monitoring of additive manufacturing,\" IISE Transactions, vol. 53, no. 11, pp. 1215-1230, 2021.", + "[92] J. Miao, H. Tao, H. Xie, J. Sun, and J. Cao, \"Reconstruction-based anomaly detection for multivariate time series using contrastive generative adversarial networks,\" Inf. Process. Manag., vol. 61, no. 1, p. 103569, 2024.", + "[93] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, \"Anomaly-gan: A data augmentation method for train surface anomaly detection,\" Expert Syst. Appl., vol. 228, p. 120284, 2023." + ], + "bbox": [ + 83, + 71, + 491, + 943 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[94] Y. Li, Z. Shi, C. Liu, W. Tian, Z. Kong, and C. B. Williams, \"Augmented time regularized generative adversarial network (atr-gan) for data augmentation in online process anomaly detection,\" IEEE Trans. Autom. Sci. Eng., vol. 19, no. 4, pp. 3338-3355, 2021.", + "[95] L. Zhang, W. Bai, X. Xie, L. Chen, and P. Dong, “Tmanomaly: Time-series mutual adversarial networks for industrial anomaly detection,” IEEE Trans. Ind. Inform., 2023.", + "[96] B. Du, X. Sun, J. Ye, K. Cheng, J. Wang, and L. Sun, \"Gan-based anomaly detection for multivariate time series using polluted training set,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12 208-12 219, 2021.", + "[97] G. Fan, Y. Ma, X. Mei, F. Fan, J. Huang, and J. Ma, “Hyperspectral anomaly detection with robust graph autoencoders,” IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021.", + "[98] S. Wang, X. Wang, L. Zhang, and Y. Zhong, \"Auto-ad: Autonomous hyperspectral anomaly detection network based on fully convolutional autoencoder,\" IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021.", + "[99] H. Liu, X. Su, X. Shen, and X. Zhou, \"Msnet: Self-supervised multiscale network with enhanced separation training for hyperspectral anomaly detection,\" IEEE Trans. Geosci. Remote Sens., 2024.", + "[100] X. Lin, Z. Li, H. Fan, Y. Fu, and X. Chen, “Exploiting negative correlation for unsupervised anomaly detection in contaminated time series,” Expert Syst. Appl., p. 123535, 2024.", + "[101] C. Huang, Z. Yang, J. Wen, Y. Xu, Q. Jiang, J. Yang, and Y. Wang, \"Self-supervision-augmented deep autoencoder for unsupervised visual anomaly detection,\" IEEE Trans. Cybern., vol. 52, no. 12, pp. 13834-13847, 2021.", + "[102] C. Yin, S. Zhang, J. Wang, and N. N. Xiong, \"Anomaly detection based on convolutional recurrent autoencoder for IoT time series,\" IEEE Trans. Syst. Man Cybern.: Syst., vol. 52, no. 1, pp. 112-122, 2020.", + "[103] W. Zhang, C. Zhang, and F. Tsung, “Grelen: Multivariate time series anomaly detection from the perspective of graph relational learning,” in IJCAI, 2022, pp. 2390–2397.", + "[104] X. Zhou, Y. Hu, W. Liang, J. Ma, and Q. Jin, \"Variational lstm enhanced anomaly detection for industrial big data,\" IEEE Trans. Ind. Inform., vol. 17, no. 5, pp. 3469-3477, 2020.", + "[105] A. Makhzani, J. Shlens, N. Jaitly, I. Goodfellow, and B. Frey, \"Adversarial autoencoders,\" arXiv preprint arXiv:1511.05644, 2015.", + "[106] Q. Su, B. Tian, H. Wan, and J. Yin, \"Anomaly detection under contaminated data with contamination-immune bidirectional gans,\" IEEE Trans. Knowl. Data Eng., 2024.", + "[107] J. Yu, X. Gao, F. Zhai, B. Li, B. Xue, S. Fu, L. Chen, and Z. Meng, \"An adversarial contrastive autoencoder for robust multivariate time series anomaly detection,\" Expert Syst. Appl., vol. 245, p. 123010, 2024.", + "[108] J. Ho, A. Jain, and P. Abbeel, “Denoising diffusion probabilistic models,” Adv. Neural Inf. Process. Syst., vol. 33, pp. 6840–6851, 2020.", + "[109] J. Wolleb, F. Bieder, R. Sandkühler, and P. C. Cattin, \"Diffusion models for medical anomaly detection,\" in Int. Conf. Med. Image Comput. Comput.-Assist. Interv. (MICCAI). Springer, 2022, pp. 35-45.", + "[110] X. Zhang, N. Li, J. Li, T. Dai, Y. Jiang, and S.-T. Xia, \"Unsupervised surface anomaly detection with diffusion probabilistic model,\" in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2023, pp. 6782-6791.", + "[111] S. Li, J. Yu, Y. Lu, G. Yang, X. Du, and S. Liu, \"Self-supervised enhanced denoising diffusion for anomaly detection,\" Inf. Sci., vol. 669, p. 120612, 2024.", + "[112] J. Zeng, X. Liu, and Z. Li, \"Radio anomaly detection based on improved denoising diffusion probabilistic models,\" IEEE Commun. Lett., 2023.", + "[113] X. Li, C. Xiao, Z. Feng, S. Pang, W. Tai, and F. Zhou, \"Controlled graph neural networks with denoising diffusion for anomaly detection,\" Expert Syst. Appl., vol. 237, p. 121533, 2024.", + "[114] C. Li, G. Feng, Y. Li, R. Liu, Q. Miao, and L. Chang, “Diffstad: Denoising diffusion probabilistic models for vehicle trajectory anomaly detection,” Knowledge-Based Systems, vol. 286, p. 111387, 2024.", + "[115] J. Pei, J. Wang, D. Shi, and P. Wang, \"Detection and imputation-based two-stage denoising diffusion power system measurement recovery under cyber-physical uncertainties,\" IEEE Trans. Smart Grid, vol. 15, no. 6, pp. 5965-5980, 2024.", + "[116] H. He, J. Zhang, H. Chen, X. Chen, Z. Li, X. Chen, Y. Wang, C. Wang, and L. Xie, \"A diffusion-based framework for multi-class anomaly detection,\" in Proc. AAAI Conf. Artif. Intell., vol. 38, no. 8, 2024, pp. 8472-8480.", + "[117] A. Sherstinsky, “Fundamentals of recurrent neural network (rnn) and long short-term memory (lstm) network,” Physica D: Nonlinear Phenomena, vol. 404, p. 132306, 2020." + ], + "bbox": [ + 506, + 70, + 919, + 943 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 905, + 30, + 919, + 40 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[118] G. Van Houdt, C. Mosquera, and G. Nápoles, “A review on the long short-term memory model,” Artif. Intell. Rev., vol. 53, no. 8, pp. 5929–5955, 2020.", + "[119] B. Lindemann, B. Maschler, N. Sahlab, and M. Weyrich, “A survey on anomaly detection for technical systems using lstm networks,” Comput. Ind., vol. 131, p. 103498, 2021.", + "[120] R. Dey and F. M. Salem, “Gate-variants of gated recurrent unit (gru) neural networks,” in Proc. 2017 IEEE 60th Int. Midwest Symp. Circuits Syst. (MWSCAS). IEEE, 2017, pp. 1597–1600.", + "[121] Y. Wang, X. Du, Z. Lu, Q. Duan, and J. Wu, \"Improved lstm-based time-series anomaly detection in rail transit operation environments,\" IEEE Trans. Ind. Inform., vol. 18, no. 12, pp. 9027-9036, 2022.", + "[122] H. Chen, H. Liu, X. Chu, Q. Liu, and D. Xue, \"Anomaly detection and critical scada parameters identification for wind turbines based on lstm-ae neural network,\" Renew. Energy, vol. 172, pp. 829-840, 2021.", + "[123] P. Liu, X. Sun, Y. Han, Z. He, W. Zhang, and C. Wu, \"Arrhythmia classification of lstm autoencoder based on time series anomaly detection,\" Biomed. Signal Process. Control, vol. 71, p. 103228, 2022.", + "[124] Y. Yao, J. Ma, S. Feng, and Y. Ye, \"Svd-ae: An asymmetric autoencoder with svd regularization for multivariate time series anomaly detection,\" Neural Networks, vol. 170, pp. 535-547, 2024.", + "[125] S. Longari, D. H. N. Valcarcel, M. Zago, M. Carminati, and S. Zanero, \"Cannolo: An anomaly detection system based on lstm autoencoders for controller area network,\" IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1913-1924, 2020.", + "[126] J. Pei, J. Wang, and D. Shi, \"Data-driven measurement tampering detection considering spatial-temporal correlations,\" in Proc. 2019 IEEE 3rd Conf. Energy Internet Energy Syst. Integr. (EI2), 2019, pp. 2641-2646.", + "[127] T. Lei, C. Gong, G. Chen, M. Ou, K. Yang, and J. Li, “A novel unsupervised framework for time series data anomaly detection via spectrum decomposition,” Knowledge-Based Systems, vol. 280, p. 111002, 2023.", + "[128] D. Hu, S. Wu, J. Wang, and D. Shi, \"Training a dynamic neural network to detect false data injection attacks under multiple unforeseen operating conditions,\" IEEE Trans. Smart Grid, 2023.", + "[129] C. Tang, L. Xu, B. Yang, Y. Tang, and D. Zhao, “Gru-based interpretable multivariate time series anomaly detection in industrial control system,” Comput. Secur., vol. 127, p. 103094, 2023.", + "[130] J. Yu, X. Gao, B. Li, F. Zhai, J. Lu, B. Xue, S. Fu, and C. Xiao, \"A filter-augmented auto-encoder with learnable normalization for robust multivariate time series anomaly detection,\" Neural Networks, vol. 170, pp. 478-493, 2024.", + "[131] A. Vaswani, \"Attention is all you need,\" Adv. Neural Inf. Process. Syst., 2017.", + "[132] H. Kang and P. Kang, \"Transformer-based multivariate time series anomaly detection using inter-variable attention mechanism,\" Knowledge-Based Systems, p. 111507, 2024.", + "[133] J. Kim, H. Kang, and P. Kang, “Time-series anomaly detection with stacked transformer representations and 1d convolutional network,” Eng. Appl. Artif. Intell., vol. 120, p. 105964, 2023.", + "[134] S. Tuli, G. Casale, and N. R. Jennings, “Tranad: Deep transformer networks for anomaly detection in multivariate time series data,” arXiv preprint arXiv:2201.07284, 2022.", + "[135] C. Wang and G. Liu, “From anomaly detection to classification with graph attention and transformer for multivariate time series,” Adv. Eng. Inform., vol. 60, p. 102357, 2024.", + "[136] J. Fan, Z. Wang, H. Wu, D. Sun, J. Wu, and X. Lu, \"An adversarial time-frequency reconstruction network for unsupervised anomaly detection,\" Neural Networks, vol. 168, pp. 44-56, 2023.", + "[137] Y. Shi, B. Wang, Y. Yu, X. Tang, C. Huang, and J. Dong, \"Robust anomaly detection for multivariate time series through temporal GCNs and attention-based vae,\" Knowledge-Based Systems, vol. 275, p. 110725, 2023.", + "[138] C. Ding, S. Sun, and J. Zhao, \"Mst-gat: A multimodal spatial-temporal graph attention network for time series anomaly detection,\" Inf. Fusion, vol. 89, pp. 527-536, 2023.", + "[139] W. Zhu, W. Li, E. R. Dorsey, and J. Luo, \"Unsupervised anomaly detection by densely contrastive learning for time series data,\" Neural Networks, vol. 168, pp. 450-458, 2023.", + "[140] H. Sun, M. Chen, J. Weng, Z. Liu, and G. Geng, \"Anomaly detection for in-vehicle network using cnn-lstm with attention mechanism,\" IEEE Trans. Veh. Technol., vol. 70, no. 10, pp. 10880-10893, 2021.", + "[141] T. Le, H. C. Vu, A. Ponchet-Durupt, N. Boudaoud, Z. Cherfi-Boulanger, and T. Nguyen-Trang, \"Unsupervised detecting anomalies in multivariate time series by robust convolutional LSTM encoder-decoder (rcled),\" Neurocomputing, vol. 592, p. 127791, 2024." + ], + "bbox": [ + 76, + 70, + 491, + 943 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[142] M. Jin, H. Y. Koh, Q. Wen, D. Zambon, C. Alippi, G. I. Webb, I. King, and S. Pan, “A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection,” IEEE Trans. Pattern Anal. Mach. Intell., 2024.", + "[143] Y. Wu, H.-N. Dai, and H. Tang, \"Graph neural networks for anomaly detection in industrial internet of things,\" IEEE Internet Things J., vol. 9, no. 12, pp. 9214-9231, 2022.", + "[144] H. Kim, B. S. Lee, W.-Y. Shin, and S. Lim, “Graph anomaly detection with graph neural networks: Current status and challenges,” IEEE Access, vol. 10, pp. 111820-111829, 2022.", + "[145] A. Deng and B. Hooi, “Graph neural network-based anomaly detection in multivariate time series,” in Proc. AAAI Conf. Artif. Intell. (AAAI), vol. 35, no. 5, 2021, pp. 4027–4035.", + "[146] Z. Chen, D. Chen, X. Zhang, Z. Yuan, and X. Cheng, “Learning graph structures with transformer for multivariate time-series anomaly detection in IoT,” IEEE Internet Things J., vol. 9, no. 12, pp. 9179–9189, 2021.", + "[147] Y. Zheng, H. Y. Koh, M. Jin, L. Chi, K. T. Phan, S. Pan, Y.-P. P. Chen, and W. Xiang, \"Correlation-aware spatial-temporal graph learning for multivariate time-series anomaly detection,\" IEEE Trans. Neural Netw. Learn. Syst., 2023.", + "[148] Y. Liu, Z. Li, S. Pan, C. Gong, C. Zhou, and G. Karypis, \"Anomaly detection on attributed networks via contrastive self-supervised learning,\" IEEE Trans. Neural Netw. Learn. Syst., vol. 33, no. 6, pp. 2378-2392, 2022.", + "[149] H. Zhao, Y. Wang, J. Duan, C. Huang, D. Cao, Y. Tong, B. Xu, J. Bai, J. Tong, and Q. Zhang, \"Multivariate time-series anomaly detection via graph attention network,\" in Proc. 2020 IEEE Int. Conf. Data Min. (ICDM)). IEEE, 2020, pp. 841-850.", + "[150] W. Chen, L. Tian, B. Chen, L. Dai, Z. Duan, and M. Zhou, “Deep variational graph convolutional recurrent network for multivariate time series anomaly detection,” in Int. Conf. Mach. Learn. (ICML). PMLR, 2022, pp. 3621–3633.", + "[151] S. Han and S. S. Woo, \"Learning sparse latent graph representations for anomaly detection in multivariate time series,\" in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min. (KDD), 2022, pp. 2977-2986.", + "[152] Y. Tang, L. Zhao, S. Zhang, C. Gong, G. Li, and J. Yang, \"Integrating prediction and reconstruction for anomaly detection,\" Pattern Recognit. Lett., vol. 129, pp. 123-130, 2020.", + "[153] M. Zheng, J. Man, D. Wang, Y. Chen, Q. Li, and Y. Liu, \"Semisupervised multivariate time series anomaly detection for wind turbines using generator scada data,\" Reliab. Eng. Syst. Saf., vol. 235, p. 109235, 2023.", + "[154] Y. Wei, J. Jang-Jaccard, W. Xu, F. Sabrina, S. Camtepe, and M. Boulic, \"Lstm-autoencoder-based anomaly detection for indoor air quality time-series data,\" IEEE Sens. J., vol. 23, no. 4, pp. 3787-3800, 2023.", + "[155] G. Pu, L. Wang, J. Shen, and F. Dong, “A hybrid unsupervised clustering-based anomaly detection method,” Tsinghua Sci. Technol., vol. 26, no. 2, pp. 146–153, 2020.", + "[156] B. Liu, Y. Xiao, L. Cao, Z. Hao, and F. Deng, \"Svdd-based outlier detection on uncertain data,\" Knowl. Inf. Syst., vol. 34, pp. 597-618, 2013.", + "[157] A. P. Muniyandi, R. Rajeswari, and R. Rajaram, \"Network anomaly detection by cascading k-means clustering and c4. 5 decision tree algorithm,\" *Proceedia Eng.*, vol. 30, pp. 174-182, 2012.", + "[158] A. M. Ikotun, A. E. Ezugwu, L. Abualigah, B. Abuhaija, and J. Heming, \"K-means clustering algorithms: A comprehensive review, variants analysis, and advances in the era of big data,\" Inf. Sci., vol. 622, pp. 178-210, 2023.", + "[159] H. V. Singh, A. Girdhar, and S. Dahiya, “A literature survey based on dbscan algorithms,” in Proc. 2022 6th Int. Conf. Intell. Comput. Control Syst. (ICICCS). IEEE, 2022, pp. 751-758.", + "[160] F. Murtagh and P. Contreras, “Algorithms for hierarchical clustering: an overview,” Wiley Interdiscip. Rev. Data Min. Knowl. Discov., vol. 2, no. 1, pp. 86–97, 2012.", + "[161] J. Li, H. Izakian, W. Pedrycz, and I. Jamal, \"Clustering-based anomaly detection in multivariate time series data,\" Appl. Soft Comput., vol. 100, p. 106919, 2021.", + "[162] A. Markovitz, G. Sharir, I. Friedman, L. Zelnik-Manor, and S. Avidan, \"Graph embedded pose clustering for anomaly detection,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2020, pp. 10539-10547.", + "[163] S. Qiu, J. Ye, J. Zhao, L. He, L. Liu, E. Bicong, and X. Huang, “Video anomaly detection guided by clustering learning,” Pattern Recognit., vol. 153, p. 110550, 2024." + ], + "bbox": [ + 506, + 70, + 919, + 943 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 905, + 30, + 919, + 40 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[164] I. Kobyzev, S. J. Prince, and M. A. Brubaker, “Normalizing flows: An introduction and review of current methods,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 11, pp. 3964–3979, 2020.", + "[165] J. Yu, Y. Zheng, X. Wang, W. Li, Y. Wu, R. Zhao, and L. Wu, \"Fastflow: Unsupervised anomaly detection and localization via 2d normalizing flows,\" arXiv preprint arXiv:2111.07677, 2021.", + "[166] M. Cho, T. Kim, W. J. Kim, S. Cho, and S. Lee, \"Unsupervised video anomaly detection via normalizing flows with implicit latent features,\" Pattern Recognit., vol. 129, p. 108703, 2022.", + "[167] Q. Zhou, S. He, H. Liu, J. Chen, and W. Meng, \"Label-free multivariate time series anomaly detection,\" IEEE Trans. Knowl. Data Eng., 2024.", + "[168] E. Dai and J. Chen, \"Graph-augmented normalizing flows for anomaly detection of multiple time series,\" arXiv preprint arXiv:2202.07857, 2022.", + "[169] Y. Zhou, X. Liang, W. Zhang, L. Zhang, and X. Song, \"Vae-based deep svdd for anomaly detection,\" Neurocomputing, vol. 453, pp. 131-140, 2021.", + "[170] Z. Zhang and X. Deng, \"Anomaly detection using improved deep svdd model with data structure preservation,\" Pattern Recognit. Lett., vol. 148, pp. 1-6, 2021.", + "[171] J. Luo, J. Lin, Z. Yang, and H. Liu, \"Smd anomaly detection: A self-supervised texture-structure anomaly detection framework,\" IEEE Trans. Instrum. Meas., vol. 71, pp. 1-11, 2022.", + "[172] C.-L. Li, K. Sohn, J. Yoon, and T. Pfister, \"Cutpaste: Self-supervised learning for anomaly detection and localization,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2021, pp. 9664-9674.", + "[173] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, \"Anomaly-gan: A data augmentation method for train surface anomaly detection,\" Expert Syst. Appl., vol. 228, p. 120284, 2023.", + "[174] Q. Wen, L. Sun, F. Yang, X. Song, J. Gao, X. Wang, and H. Xu, \"Time series data augmentation for deep learning: A survey,\" arXiv preprint arXiv:2002.12478, 2020.", + "[175] H. Hojjati, T. K. K. Ho, and N. Armanfard, \"Self-supervised anomaly detection in computer vision and beyond: A survey and outlook,\" Neural Networks, vol. 172, p. 106106, 2024.", + "[176] X. Zhang, M. Xu, and X. Zhou, “Realnet: A feature selection network with realistic synthetic anomaly for anomaly detection,” in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2024, pp. 16699–16708.", + "[177] F. Van Wyk, Y. Wang, A. Khojandi, and N. Masoud, “Real-time sensor anomaly detection and identification in automated vehicles,” IEEE Trans. Intell. Transp. Syst., vol. 21, no. 3, pp. 1264–1276, 2019.", + "[178] M. Abouof, R. Mizouni, S. Singh, H. Otrok, and E. Damiani, \"Self-supervised online and lightweight anomaly and event detection for IoT devices,\" IEEE Internet Things J, vol. 9, no. 24, pp. 25 285-25 299, 2022.", + "[179] X. Zhou, J. Wu, W. Liang, I. Kevin, K. Wang, Z. Yan, L. T. Yang, and Q. Jin, \"Reconstructed graph neural network with knowledge distillation for lightweight anomaly detection,\" IEEE Trans. Neural Netw. Learn. Syst., 2024.", + "[180] Y. Zhao, G. H. Chen, and Z. Jia, “Tod: GPU-accelerated outlier detection via tensor operations,” arXiv preprint arXiv:2110.14007, 2021.", + "[181] A. Al-Mazrawe and B. Al-Musawi, “Anomaly detection in cloud network: A review,” in BIO Web of Conferences, vol. 97. EDP Sciences, 2024, p. 00019.", + "[182] Z. Niu, G. Zhong, and H. Yu, “A review on the attention mechanism of deep learning,” Neurocomputing, vol. 452, pp. 48–62, 2021.", + "[183] H. Liu, X. Huang, M. Jia, T. Jia, J. Han, Y. Li, and Z. Wu, \"Uac-ad: Unsupervised adversarial contrastive learning for anomaly detection on multi-modal data in microservice systems,\" IEEE Trans. Serv. Comput., 2024." + ], + "bbox": [ + 76, + 71, + 491, + 773 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025", + "bbox": [ + 76, + 29, + 426, + 41 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 23 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_model.json b/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e32206a829c6586c126fb93dcf93c06a631e7bf3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_model.json @@ -0,0 +1,5132 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.041 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "1" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.261, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.13195v1 [cs.LG] 17 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.155, + 0.071, + 0.843, + 0.141 + ], + "angle": 0, + "content": "Deep Learning Advancements in Anomaly Detection: A Comprehensive Survey" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.148, + 0.905, + 0.182 + ], + "angle": 0, + "content": "Haoqi Huang, Ping Wang\\(\\text{©}\\), Fellow, IEEE, Jianhua Pei\\(\\text{©}\\), Graduate Student Member, IEEE, Jiacheng Wang\\(\\text{©}\\), Shahren Alexanian, and Dusit Niyato\\(\\text{©}\\), Fellow, IEEE" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.236, + 0.493, + 0.514 + ], + "angle": 0, + "content": "Abstract—The rapid expansion of data from diverse sources has made anomaly detection (AD) increasingly essential for identifying unexpected observations that may signal system failures, security breaches, or fraud. As datasets become more complex and high-dimensional, traditional detection methods struggle to effectively capture intricate patterns. Advances in deep learning have made AD methods more powerful and adaptable, improving their ability to handle high-dimensional and unstructured data. This survey provides a comprehensive review of over 180 recent studies, focusing on deep learning-based AD techniques. We categorize and analyze these methods into reconstruction-based and prediction-based approaches, highlighting their effectiveness in modeling complex data distributions. Additionally, we explore the integration of traditional and deep learning methods, highlighting how hybrid approaches combine the interpretability of traditional techniques with the flexibility of deep learning to enhance detection accuracy and model transparency. Finally, we identify open issues and propose future research directions to advance the field of AD. This review bridges gaps in existing literature and serves as a valuable resource for researchers and practitioners seeking to enhance AD techniques using deep learning." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.521, + 0.492, + 0.56 + ], + "angle": 0, + "content": "Index Terms—Anomaly detection, deep learning, data reconstruction and prediction, Internet of things, comprehensive survey." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.584, + 0.352, + 0.598 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.605, + 0.491, + 0.756 + ], + "angle": 0, + "content": "An anomaly refers to an observation that significantly deviates from the expected behavior in a system, often appearing unusual, inconsistent, or unexpected [1]. Despite the fact that outliers typically constitute only a small fraction of a dataset, they are often highly crucial because they carry important information and can reveal critical insights during analysis. Consequently, anomaly detection (AD) is the process of identifying such anomalous observations using various methods and algorithms, which aids decision-makers in better understanding data patterns and behaviors." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.757, + 0.492, + 0.817 + ], + "angle": 0, + "content": "The rapid development of the Internet of Things (IoT) has revolutionized the way data is generated, collected, and analyzed across various domains. IoT systems leverage a wide array of interconnected sensors and devices to collect massive" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.83, + 0.493, + 0.945 + ], + "angle": 0, + "content": "H. Huang, P. Wang and S. Alexanian are with the Lassonde School of Engineering, York University, Toronto, ON M3J 1P3, Canada (e-mail:joycehhq@yorku.ca; pingw@yorku.ca; yu263319@my.yorku.ca). J. Pei is with the State Key Laboratory of Advanced Electromagnetic Technology, School of Electrical and Electronic Engineering, Huazhong University of Science and Technology, Wuhan 430074, China (e-mail: jianhuapei@hust.edu.cn). J. Wang and D. Niyato are with the School of Computer Science and Engineering, Nanyang Technological University, Singapore (e-mail: jcwang_cq@foxmail.com; dniyato@ntu.edu.sg)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.235, + 0.923, + 0.386 + ], + "angle": 0, + "content": "amounts of real-time data in diverse applications, including smart cities [2], industrial automation [3], healthcare [4], and transportation [5], etc. This proliferation of sensor data introduces unprecedented opportunities for enhancing operational efficiency and decision-making processes. However, it also presents significant challenges, as the data is often high-dimensional, noisy, and prone to anomalies caused by faulty sensors, environmental changes, or malicious attacks [6]. Detecting anomalies in data is critical for ensuring system reliability, security, and performance [7]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.387, + 0.923, + 0.929 + ], + "angle": 0, + "content": "AD methodologies can be systematically classified according to various criteria. One prominent classification framework differentiates these methods into supervised, semi-supervised, and unsupervised approaches, predicated on the availability and nature of labeled data [8]. Supervised learning-based AD algorithms necessitate a fully labeled dataset, where each data point is explicitly annotated as either normal or anomalous. This labeling process facilitates the model's ability to discern and learn the underlying characteristics that differentiate anomalous instances from normal ones, thereby enhancing its detection accuracy. Semi-supervised learning-based methods, on the other hand, operate with a dataset comprising a substantial volume of unlabeled data alongside a smaller subset of labeled instances. These labels may include both normal and anomalous data, or in certain cases, solely normal instances [9]. In scenarios where only normal data is labeled, the semi-supervised approach converges towards unsupervised methodologies, as the model predominantly learns normal behavior patterns and identifies anomalies as deviations from these learned patterns. Unsupervised learning-based AD methods eschew the need for labeled data entirely, leveraging the intrinsic structural properties of the dataset to autonomously identify anomalies [10] [11]. In practical applications, a significant portion of contemporary AD research gravitates towards unsupervised methods [12]. This preference is largely driven by the substantial imbalance between the number of normal instances and anomalies, which complicates the acquisition of a sufficiently large labeled dataset required for effective supervised learning [13]. Moreover, anomalies are frequently correlated with critical failures or hazardous events, rendering the labeling process both costly and logistically challenging. Another key classification criterion is the nature of the dataset, particularly whether it comprises time-series data, which distinguishes AD methods into time-series [14] and non-temporal approaches. The applications of time-series and non-temporal AD will be discussed in detail in Section III." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.93, + 0.922, + 0.946 + ], + "angle": 0, + "content": "In addition to the temporal aspect, AD techniques can also" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "2" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.449 + ], + "angle": 0, + "content": "be categorized based on their underlying paradigms: traditional methods and deep learning-based methods. Traditional techniques encompass statistical approaches [15], distance-based methods [16], and clustering algorithms [17]. These approaches generally rely on estimating the probability distribution of normal data to predict anomalies. However, since the early 20th century, the fields of data science, machine learning, deep learning, and artificial intelligence have witnessed exponential growth, with significant implications for AD [18]. Particularly in recent years, the advent of soft-computing techniques has significantly influenced the development of deep learning-based methods. These techniques are characterized by their ability to handle imprecise, uncertain, and nonlinear data, making them highly suitable for applications involving deep learning. Consequently, deep learning-based methods have been propelled to the forefront due to their superior capability to learn expressive representations of complex data, including high-dimensional, temporal, spatial, and graph-structured data [19]. By proficiently modeling intricate patterns and relationships inherent in the data, deep learning approaches have proven remarkably effective in identifying anomalies across a wide range of challenging and complex datasets. This paper concentrates specifically on AD methods based on deep learning models, with the objective of providing a comprehensive review of this rapidly evolving field." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.459, + 0.49, + 0.488 + ], + "angle": 0, + "content": "A. Contrasting Traditional Models with Deep Learning Models" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.493, + 0.493, + 0.916 + ], + "angle": 0, + "content": "Traditional AD methods [20], such as statistical techniques, clustering algorithms [21], and Principal Component Analysis (PCA) [22], have long been established as reliable tools across a wide spectrum of applications due to their simplicity, interpretability, and low computational overhead. These characteristics make them particularly promising in scenarios where model transparency and efficiency are paramount. Statistical techniques, for example, provide clear, rule-based mechanisms for detecting anomalies, while clustering algorithms are effective in grouping similar data points and isolating outliers in relatively low-dimensional datasets. Similarly, PCA has been widely adopted for dimensionality reduction, enabling effective AD by isolating principal components that capture major variations in the data [17]. Despite these advantages, traditional methods often encounter significant limitations when applied to modern, complex datasets. Statistical techniques generally assume that data adheres to specific distributions. However, this assumption is rarely met in real-world scenarios, where data often exhibits non-Gaussian distributions and heavy tails. Clustering-based methods, while useful in many contexts, check to accurately define clusters, particularly when anomalies do not present clear separability from normal data. PCA, on the other hand, relies heavily on the assumption of linearity and extensive feature engineering, making it less effective at capturing the nuanced, non-linear patterns prevalent in high-dimensional datasets [22]. These constraints have prompted a shift towards more advanced approaches capable of handling the increasing complexity of modern data." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.946 + ], + "angle": 0, + "content": "In contrast, deep learning models have recently emerged as a powerful alternative, addressing many of the shortcom" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.068, + 0.911, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.702, + 0.704, + 0.716 + ], + "angle": 0, + "content": "Fig. 1. The anatomy of this survey." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.748, + 0.922, + 0.946 + ], + "angle": 0, + "content": "ings inherent in traditional approaches. Deep neural networks (DNNs) possess the capacity to autonomously learn complex patterns and hierarchical representations from raw data, thereby obviating the need for labor-intensive feature engineering [23]. This characteristic is particularly advantageous in the detection of subtle and multifaceted anomalies that might elude traditional methods [24]. By leveraging their multilayered architectures, deep learning models excel in processing high-dimensional and unstructured data, such as images, videos, and text, which are often challenging for conventional methods to handle effectively [25]. These models are adept at capturing non-linear relationships and interactions within the data, offering a more flexible and robust framework for AD" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.492, + 0.114 + ], + "angle": 0, + "content": "[26]. Consequently, there has been a significant shift away from purely traditional AD techniques towards the adoption of deep learning methodologies." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.115, + 0.493, + 0.281 + ], + "angle": 0, + "content": "Nonetheless, it is crucial to acknowledge that traditional AD models retain certain advantages, notably in their simplicity, interpretability, and lower computational overheads [27]. These characteristics make them particularly appealing in scenarios where model transparency and computational efficiency are crucial. In recognition of these strengths, Section V of this paper will introduce and discuss various existing approaches that integrate traditional methods with deep learning techniques. These hybrid methods aim to leverage the strengths of both paradigms, resulting in more robust and efficient AD systems." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.299, + 0.336, + 0.314 + ], + "angle": 0, + "content": "B. Comparison With Existing Surveys" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.317, + 0.495, + 0.695 + ], + "angle": 0, + "content": "In recent years, the field of AD has seen a surge in research, particularly with the advent of deep learning methods. Numerous surveys have been published, each attempting to provide a comprehensive overview of the field. However, many of these surveys focus on broader historical developments or cover deep learning techniques only up to a certain point in time. For example, surveys such as [19], [28], [29], and [23] primarily cover techniques developed up to 2020. While these surveys are valuable, they do not reflect the most recent advancements in the field. Furthermore, specific models such as Generative Adversarial Network (GAN)-based AD have been explored in-depth by studies [30], [31], [32], [33], and [34]. However, these studies primarily address foundational approaches and lack coverage of advanced techniques like conditional GANs, cycle-consistent GANs, and GANs integrated with self-supervised learning. Emerging hybrid models, combining GANs with Variational Autoencoders (VAEs) or autoencoders for improved robustness, are also underrepresented. In contrast, our survey covers the literature from 2019 to 2024, providing a timely and comprehensive overview of the latest advancements. By focusing on recent trends and evolving techniques, including enhanced architectures and hybrid frameworks, our work offers a more current perspective, bridging existing gaps and guiding future research directions in AD." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.713, + 0.291, + 0.727 + ], + "angle": 0, + "content": "C. Contributions and Structure" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.732, + 0.491, + 0.852 + ], + "angle": 0, + "content": "This survey systematically reviews over 160 recent research papers on AD, including publications from leading journals (IEEE, ACM, Springer, Elsevier) and top-tier conferences (AAAI, CCS, ICCV) spanning from 2019 to 2024. By focusing on cutting-edge advancements in deep learning-based methods, this survey ensures a comprehensive and up-to-date overview of the field. The contributions of this survey are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.855, + 0.493, + 0.947 + ], + "angle": 0, + "content": "- This survey addresses gaps in prior surveys by highlighting advanced techniques that were previously underexplored, including conditional GANs, cycle-consistent GANs, and hybrid frameworks combining GANs with VAEs. These models are introduced and analyzed to demonstrate their strengths and weaknesses." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.07, + 0.923, + 0.159 + ], + "angle": 0, + "content": "- This survey provides a detailed comparison of reconstruction-based and prediction-based methods. To enhance clarity and usability, we summarize key strengths, weaknesses, and applications in structured tables, offering readers insights into the trade-offs of different models." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.16, + 0.923, + 0.248 + ], + "angle": 0, + "content": "- Recognizing the strengths of traditional methods, this survey explores their integration with deep learning models. Hybrid approaches, such as clustering, normalizing flows, and support vector data descriptions combined with deep learning, are analyzed to address complex challenges in AD." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.255, + 0.925, + 0.468 + ], + "angle": 0, + "content": "The organization of this survey is shown in Fig.1. Section II provides an overview of data characteristics and anomaly types, followed by a discussion of common data processing challenges and mitigation strategies critical to effective AD. Section III explores the related applications of AD. Section IV categorizes and analyzes deep learning methods for AD, highlighting their effectiveness and limitations. Section V discusses the integration of traditional methods with deep learning, including clustering methods, normalizing flows, and support vector data descriptions. Section VI highlights open issues and future directions, such as challenges in data collection, computational complexity, explainability, and handling diverse anomaly types. Finally, Section VII concludes the survey with a summary and potential directions for future research." + }, + { + "type": "title", + "bbox": [ + 0.543, + 0.491, + 0.882, + 0.505 + ], + "angle": 0, + "content": "II. DATA CHARACTERISTICS AND CHALLENGES" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.516, + 0.829, + 0.532 + ], + "angle": 0, + "content": "A. Overview of Input Data and Anomaly Types" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.538, + 0.925, + 0.947 + ], + "angle": 0, + "content": "In AD, input data presents unique challenges due to its structure, dimensionality, and temporal nature. Different types of data require specialized techniques to effectively identify anomalies, and the nature of anomalies themselves can vary greatly depending on the domain and data format [28]. For instance, visual data such as images and videos may exhibit anomalies associated with spatial or temporal inconsistencies, while time series data often involves anomalies related to trends or sudden changes in values over time. To better understand these variations, we first categorize data into textual, audio, image, and video formats, highlighting their respective characteristics and the challenges they pose for AD. Beyond this classification, data can also be viewed through the lens of temporal dependencies, distinguishing between time-series data, which captures sequential patterns over time, and nontemporal data, where observations are independent of temporal order. This dual perspective provides a comprehensive framework for analyzing how different types of anomalies manifest across various data formats. Furthermore, the nature of anomalies themselves can vary depending on the data format. Point anomalies, sequence anomalies, and outliers may all manifest differently across different data types and structures. Understanding these distinctions is essential for selecting the appropriate AD techniques [29], as a deep understanding of data characteristics and anomaly types ensures that detection methods are effectively tailored to capture the specific behaviors and patterns indicative of anomalies." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "4" + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.07, + 0.317, + 0.085 + ], + "angle": 0, + "content": "1) Categorization by Data Type:" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.09, + 0.493, + 0.226 + ], + "angle": 0, + "content": "- Textual Data: Textual data consists of sequences of discrete symbols, such as characters, words, or phrases, structured in a linear format. Unlike other data types, textual data conveys information through syntactic and semantic relationships. It can be found in various forms, including documents, chat messages, emails, and system logs. Anomalies in textual data may appear as irregular word sequences, syntactic inconsistencies, missing or misplaced words, or semantically incoherent phrases." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.226, + 0.493, + 0.392 + ], + "angle": 0, + "content": "- Audio Data: Audio data captures variations in amplitude and frequency over time, representing spoken language, environmental sounds, or machine signals. It can be stored as waveforms or transformed into frequency-domain representations like spectrograms. Unlike textual data, audio data is continuous and often requires spectral analysis to extract meaningful patterns. Anomalies in audio data manifest as unexpected distortions, unusual frequency shifts, missing segments, or abnormal sound patterns caused by malfunctioning equipment, altered speech, or environmental noise." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.392, + 0.493, + 0.544 + ], + "angle": 0, + "content": "- Image Data: Image data consists of two-dimensional pixel grids, where each pixel represents intensity or color information. Unlike sequential data, image data encodes spatial relationships, capturing textures, shapes, and patterns. Image anomalies often appear as distortions, irregular textures, missing components, or unexpected objects that deviate from normal patterns. For instance, these can result from manufacturing defects, medical imaging errors, or environmental changes in satellite imagery." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.543, + 0.493, + 0.709 + ], + "angle": 0, + "content": "- Video Data: Video data extends image data by incorporating a temporal dimension, forming sequences of frames over time. Each frame within a video is an image, and the relationships between frames capture motion and dynamic interactions [35]. Unlike static images, video data requires modeling temporal dependencies, making AD more complex. Anomalies in video data include irregular movements, unexpected scene transitions, or unusual object behaviors, which are commonly observed in surveillance footage, traffic monitoring, and activity recognition." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.709, + 0.493, + 0.86 + ], + "angle": 0, + "content": "- Tabular Data: Tabular data consists of structured records organized in rows and columns, where each row represents an entity or event, and each column corresponds to an attribute. This type of data is widely used in databases, spreadsheets, financial records, and sensor logs. Unlike the other data types, tabular data can contain numerical, categorical, or mixed-format information. Anomalies in tabular data include missing values, unexpected categorical labels, numerical outliers, or inconsistent relationships between attributes." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.865, + 0.416, + 0.88 + ], + "angle": 0, + "content": "2) Categorization by Temporal Characteristics:" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.885, + 0.493, + 0.947 + ], + "angle": 0, + "content": "- Time-based data: Time-based data can be represented as a sequence of observations recorded over time, and it may consist of either a single variable (univariate) or multiple variables (multivariate). We can generalize the" + }, + { + "type": "text", + "bbox": [ + 0.537, + 0.07, + 0.923, + 0.493 + ], + "angle": 0, + "content": "representation of both univariate and multivariate time series using the following formula: \\( X = \\{x_{t,j}\\}_{t\\in T,j\\in J} \\), where \\( t\\in T \\) denotes the time index, with \\( t \\) representing a specific time step and \\( T \\) being the set of all time steps in the dataset. Similarly, \\( j\\in J \\) represents the dimension or variable index, where \\( j \\) refers to a particular variable and \\( J \\) is the set of all variables or dimensions in the data. When \\( |J| = 1 \\), the series is univariate, meaning there is only one variable observed over time. In contrast, when \\( |J| > 1 \\), the series is multivariate, indicating that multiple variables are recorded simultaneously at each time step. Each observation \\( x_{t,j} \\) corresponds to the value of the \\( j \\)-th variable at time \\( t \\). Among the five previously introduced data types, audio, video, and certain types of textual and tabular data are inherently time-based. Audio data is naturally sequential, with sound signals evolving over time, making anomalies such as distortions or frequency shifts dependent on temporal patterns. Video data extends image sequences over time, requiring the detection of abnormal object movements, scene transitions, or motion inconsistencies. Textual data, such as streaming logs, system event records, or chat conversations, also exhibits temporal dependencies, where anomalies may appear as unexpected event sequences or irregular timing between log entries. Similarly, tabular data in the form of financial transactions, sensor readings, or stock prices follows a time-series format, where anomalies may indicate fraud, equipment failure, or unusual market behaviors." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.493, + 0.923, + 0.84 + ], + "angle": 0, + "content": "- Non-temporal data: Non-temporal data refers to observations that lack a temporal sequence, where the relationships between data points are independent of time. Such data is prevalent across industries that rely on static datasets or event-based observations. AD in non-temporal data focuses on identifying irregularities by analyzing data characteristics, patterns, or statistical properties rather than temporal dependencies. This process is crucial for uncovering hidden risks, fraudulent activities, or system malfunctions in contexts where time is not a defining factor. Among the five data types, image and certain types of tabular data are the most common forms of non-temporal data. Image data, such as medical scans, industrial defect detection images, or satellite photos, captures spatial relationships but does not depend on a temporal sequence. Anomalies in such data typically appear as unusual textures, distortions, or unexpected objects. Tabular data, when not used for time-series analysis, is also non-temporal, such as customer records, product attributes, or static financial datasets. In these cases, AD focuses on identifying outliers, inconsistencies, or unusual relationships between different features rather than changes over time." + }, + { + "type": "title", + "bbox": [ + 0.522, + 0.847, + 0.685, + 0.862 + ], + "angle": 0, + "content": "3) Types of Anomalies:" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.87, + 0.922, + 0.93 + ], + "angle": 0, + "content": "- Point Anomalies: A single data point deviates significantly from the expected behavior in the dataset. These are common across both time-based and non-time-based data, representing sudden outliers or unusual values." + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.93, + 0.922, + 0.945 + ], + "angle": 0, + "content": "- Contextual Anomalies: A data point is considered" + }, + { + "type": "list", + "bbox": [ + 0.522, + 0.87, + 0.922, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.07, + 0.493, + 0.145 + ], + "angle": 0, + "content": "anomalous only when it is analyzed within a specific context or surrounding data. In time-based data, this could involve seasonal trends or time-of-day variations, whereas in non-time-based data, it could depend on relationships between variables." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.146, + 0.493, + 0.221 + ], + "angle": 0, + "content": "- Subsequence Anomalies: A contiguous sequence of data points behaves abnormally, typically found in time series data. These anomalies are significant when the temporal order of data points plays a key role in detecting deviations from expected patterns." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.221, + 0.493, + 0.311 + ], + "angle": 0, + "content": "- Cluster-based and Correlation Anomalies: Anomalies that occur when a group of data points, or relationships between variables, deviate from expected patterns. This is more prominent in non-time-based data, where detecting irregular clusters or correlations between features is essential for AD." + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.146, + 0.493, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.335, + 0.214, + 0.349 + ], + "angle": 0, + "content": "B. Data Processing" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.355, + 0.493, + 0.581 + ], + "angle": 0, + "content": "Effective AD requires careful preparation and preprocessing of input data to ensure that detection algorithms can operate effectively. In many cases, raw data contains inherent challenges that can significantly hinder the performance of AD models. These challenges arise from the complexity of real-world data, including high dimensionality, missing or sparse values, skewed class distributions, and noise that can obscure true anomalies. Without addressing these issues, AD methods may struggle to accurately identify rare or subtle deviations in the data, leading to false positives, missed anomalies, or inefficient computations. Therefore, appropriate data preprocessing steps are crucial for improving detection accuracy, robustness, and overall system reliability. This subsection outlines some of the most common data processing issues and their implications for AD, along with strategies to mitigate these challenges." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.582, + 0.493, + 0.868 + ], + "angle": 0, + "content": "1) Dimensionality: High-dimensional data makes AD more complex due to the \"curse of dimensionality\". As datasets expand in size and complexity—particularly with the rise of \"big data\", characterized by large-scale, high-velocity data generated from diverse sources, it becomes increasingly difficult for AD methods to maintain accuracy [36]. To address this issue, dimensionality reduction is a common approach that transforms a large set of input features into a smaller, more focused feature set [37]. While traditional methods such as PCA [38] are frequently used, they may struggle to capture nonlinear relationships in complex data. For instance, Sakurada et al. [39] compare autoencoders, which perform non-linear dimensionality reduction, with linear PCA and kernel PCA on both synthetic and real-world datasets. The study reveals that on the nonlinear and high-dimensional synthetic Lorenz dataset, AE achieved a relative AUC improvement of \\(26.83\\%\\) compared to linear PCA. This highlights that autoencoders can even detect anomalies in data with relatively high intrinsic dimensionality, where linear PCA struggles to perform." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.87, + 0.493, + 0.947 + ], + "angle": 0, + "content": "2) Sparsity: Sparse data, where many values are missing or incomplete, poses significant challenges for AD. Sparse datasets can lead to reduced detection accuracy, as missing or incomplete data points may obscure the underlying patterns necessary for detecting anomalies [36]. Cheng et al." + }, + { + "type": "list", + "bbox": [ + 0.074, + 0.582, + 0.493, + 0.947 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.069, + 0.922, + 0.295 + ], + "angle": 0, + "content": "[40] highlight that in high-dimensional settings, the sparsity problem is further amplified as the data becomes more spread out, increasing the risk of missing critical information that signals anomalies. To address these challenges, Li et al. [41] propose an improved low-rank and sparse decomposition model (LSDM) for hyperspectral AD. Their approach models sparse components as a Gaussian Mixture (MoG), effectively capturing anomalous patterns within complex datasets by leveraging the low-rank structure. In contrast, Han et al. [42] take a different approach by introducing sparse autoencoders to learn sparse latent representations from high-dimensional input data. Through experiments on three real-world cyber-physical system datasets, the study shows that mining sparse latent patterns from high-dimensional time series can significantly improve the robustness of AD models." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.296, + 0.922, + 0.568 + ], + "angle": 0, + "content": "3) Class Imbalance: In most AD tasks, the occurrence of anomalies is significantly rarer than normal data points, resulting in a class imbalance problem. This imbalance can cause detection algorithms to be overly biased toward the majority class (normal data), leading to a higher rate of false negatives where critical anomalies are missed. In imbalanced datasets, it is often possible to achieve an overall high accuracy, while the recall score for the minority class (anomalies) remains very low [43]. Traditional methods to mitigate this issue involve oversampling the minority class or undersampling the majority class [44]. Recent research has increasingly focused on introducing Data Generation Models (DGM) to improve the representation of the minority class in AD. For instance, Dlamini et al. [45] use Conditional Generative Adversarial Networks (CGANs) to generate synthetic samples for the minority class and combines this with KL divergence to guide the model in accurately learning the distribution of the minority class." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.568, + 0.922, + 0.854 + ], + "angle": 0, + "content": "4) Noise in Data: Noise refers to random or irrelevant information present in the data, which can obscure true anomalies and lead to false positives. In addition, during the training process of AD models, the high complexity of the model and the presence of noisy data can lead to overfitting, where the model inadvertently learns to fit the reconstruction error from noisy inputs rather than focusing on genuine anomalies [46]. To reduce the impact of noisy data, Zhang et al. [47] incorporate a Maximum Mean Discrepancy (MMD) to encourage the distribution of low-dimensional representations to approximate a target distribution. The goal is to align the distribution of noisy data with that of normal training data, thereby reducing the risk of overfitting. Furthermore, Chen et al. [48] propose a novel method called Noise Modulated Adversarial Learning, where noise images from a predefined normal distribution are fed into the discriminator network as negative samples. This adversarial process modulates the training of the reconstruction network, balancing the learning between the two networks to improve robustness against noise." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.855, + 0.922, + 0.945 + ], + "angle": 0, + "content": "5) Privacy of data: In many fields, such as healthcare, finance, and cybersecurity, data used for AD often contains sensitive or personal information. Ensuring the privacy and security of this data is paramount, as improper handling could lead to serious legal and ethical violations. Hassan et al. [49] conducte an in-depth investigation into the privacy of AD" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.296, + 0.922, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.298 + ], + "angle": 0, + "content": "models in blockchain technology. To address these privacy concerns, Federated Learning (FL), a distributed machine learning paradigm, has emerged as a promising supplement to AD [50]. FL allows distributed clients to collaboratively train a shared model while protecting the privacy of their local data. For example, Idrissi et al. [51] propose Fed-ANIDS, which leverages FL to address the privacy issues associated with centralized Network Intrusion Detection Systems (NIDS). This model was applied to various settings and popular datasets, demonstrating its ability to achieve high performance while preserving the privacy of distributed client data. Cui et al. [52] further introduce GAN into FL and design a new algorithm model that injects controllable noise into local model parameters, ensuring both AD utility and compliance with differential privacy requirements." + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.314, + 0.388, + 0.327 + ], + "angle": 0, + "content": "III. RELATED APPLICATIONS" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.332, + 0.493, + 0.651 + ], + "angle": 0, + "content": "With the rapid advancement of deep learning models, AD has become more efficient and adaptable. These sophisticated models have been widely applied across various domains, enhancing the ability to identify irregular patterns in complex and high-dimensional datasets. In the previous chapter, we categorized data based on temporal characteristics into time-series and non-time-series data. However, visual data presents unique challenges, detection requirements, and a wide range of applications, making it difficult to be strictly classified as either time-series or non-time-series data. It can be static (e.g., images) or dynamic (e.g., videos), where images are typically considered non-time-series data, while videos fall under time-series data. Visual data is extensively used in fields such as medical imaging, autonomous systems, and surveillance, where detecting anomalies requires specialized deep learning techniques that differ from traditional numerical or categorical data analysis. To better reflect its broad applications and distinct computational needs, we discuss visual data separately. Based on this classification, we will now explore the applications of deep learning in AD from three perspectives: time-series data, non-temporal data, and visual data." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.67, + 0.327, + 0.684 + ], + "angle": 0, + "content": "A. Applications in Time Series Data" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.688, + 0.491, + 0.868 + ], + "angle": 0, + "content": "Time series data, defined by its sequential nature over time, is fundamental to many systems where the temporal order of events critically influences analysis and decision-making processes. AD in time series data has become an indispensable technique across various industries, enabling the early detection of irregular patterns that may indicate underlying issues or emerging threats. The applications of time series AD are extensive, impacting critical areas such as traffic monitoring, power system management, and healthcare. In the following sections, we present how these applications leverage AD to enhance operational efficiency, ensure system reliability, and improve safety across these fields." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.87, + 0.492, + 0.947 + ], + "angle": 0, + "content": "1) Traffic Monitoring: Time series AD plays a pivotal role in modern traffic management systems. As demonstrated in [53], real-time data from loop detection sensors are integrated and analyzed to predict traffic volume and enhance system safety. The ability to detect anomalies in traffic patterns is" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.07, + 0.923, + 0.312 + ], + "angle": 0, + "content": "essential for anticipating and responding to potential incidents before they escalate. For instance, Li et al. [54] present a method that identifies traffic incidents by detecting anomalies in traffic time series data, thereby helping users avoid accidents and reduce travel time. Furthermore, high-speed driving is identified as a significant contributor to traffic accidents [55]. By monitoring and analyzing sudden increases in vehicle speed, AD techniques can predict and prevent accidents more effectively, providing a critical tool for improving road safety. Zhao et al. [56] further validate the efficacy of unsupervised AD methods in assessing elevated road traffic accident risks, specifically by analyzing volume and speed data from traffic on Yan'an elevated road. This approach enhances the ability to detect and respond to hazardous traffic conditions in real-time, underscoring the indispensable role of AD in traffic management." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.327, + 0.923, + 0.658 + ], + "angle": 0, + "content": "2) Power System: AD is a vital element in ensuring the stability, security, and reliability of electrical grids. By continuously monitoring grid data, these techniques can swiftly identify deviations from normal operational patterns, which may indicate issues such as natural faults or malicious cyberattacks. The ability to detect these anomalies in real-time is crucial for preventing potential outages and maintaining a consistent power supply. For instance, Li et al. [57] highlight that accurate and real-time AD can enhance grid stability by over \\(20\\%\\), providing rapid response capabilities that significantly bolster the system's defense against both natural disruptions and cyber threats. Furthermore, the introduction of a residential electrical load AD framework, as demonstrated in [58], has been shown to significantly improve both load prediction accuracy and AD, thereby optimizing demand-side management (DSM) in residential areas. In terms of cybersecurity, the MENSA Intrusion Detection System (IDS) [59] has proven to be a formidable tool in smart grid environments, effectively detecting operational anomalies and classifying a wide range of cyberattacks. This capability not only protects critical infrastructure but also underscores the indispensable role of AD in modern power system management." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.673, + 0.923, + 0.945 + ], + "angle": 0, + "content": "3) Healthcare: AD plays a crucial role in healthcare by enabling continuous monitoring of patient vital signs, such as heart rate and blood pressure, to swiftly identify abnormal conditions that may require urgent medical intervention. The application of AD in medical signal analysis is particularly important, as highlighted in [60], where the identification of data samples that deviate from the typical data distribution can reveal underlying issues such as noise, changes in a patient's condition, or the emergence of new and previously undetected medical conditions. This capability is essential for ensuring accurate diagnosis and timely patient care. Furthermore, Keeley et al. [61] demonstrate that AD algorithms can effectively identify irregularities in heart rate data, which not only facilitates faster emergency responses but also provides deeper insights into a patient's health status. This, in turn, enhances overall patient care while also reducing the cognitive load on healthcare professionals by automating the detection of potential issues." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "7" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.07, + 0.341, + 0.085 + ], + "angle": 0, + "content": "B. Applications in Non-temporal Data" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.092, + 0.493, + 0.243 + ], + "angle": 0, + "content": "AD in non-temporal data plays a critical role in ensuring operational integrity, security, and financial stability. By focusing on identifying irregularities within independent events or static datasets, it addresses potential risks such as fraud, system failures, and malicious activities. Unlike time-series applications, non-temporal AD leverages data patterns and statistical analysis to uncover deviations that signal anomalies. In the following, we present specific applications across domains such as finance and cybersecurity, showcasing its significant impact on safeguarding critical systems and operations." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.244, + 0.493, + 0.47 + ], + "angle": 0, + "content": "1) Finance: In the financial sector, non-temporal data AD is pivotal for identifying fraudulent transactions, credit scoring anomalies, and unusual trading activities. Unlike time series data, these financial fraud detection tasks often involve independent events, such as individual transactions or credit score evaluations, which do not rely on temporal sequences. Instead, the focus is on transaction characteristics and patterns that may indicate fraudulent behavior. Various data mining techniques, including SVM, Naïve Bayes, and Random Forest, are extensively employed to detect different forms of financial fraud, such as bank fraud, insurance fraud, financial statement fraud, and cryptocurrency fraud [62]. As highlighted by [63], AD is critical in quickly identifying activities that deviate from normal patterns, thereby enabling rapid intervention to minimize financial losses." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.472, + 0.495, + 0.835 + ], + "angle": 0, + "content": "2) Cybersecurity: AD is a fundamental component of maintaining a secure and resilient cyberspace. As [64] points out, advanced security controls and resilience analysis are crucial during the early stages of system deployment to ensure long-term sustainability. AD plays a pivotal role in this process by identifying unauthorized access, malicious activities, and network intrusions that deviate from established norms. This capability is essential for safeguarding network security and preventing potential breaches. Early research in deep learning-based network intrusion detection focused on architectures such as Autoencoders (AE), Deep Belief Networks (DBN), and Recurrent Neural Networks (RNN) [24]. As deep learning technology has advanced, more sophisticated models have been developed for detecting anomalies in cybersecurity. For instance, Singh et al. [65] illustrate the benefits of AD in wide-area protection schemes (WAPS) by using a deep learning-based cyber-physical AD system (CPADS) to detect and mitigate data integrity and communication failure attacks in centralized Remedial Action Schemes (CRAS). Similarly, Nagarajan et al. [66] highlights the effectiveness of AD in enhancing the security of Cyber-Physical Systems (CPSs) by accurately identifying anomalous behaviors, thereby addressing the growing challenges posed by sophisticated cyberattacks and the increasing volume of data." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.863, + 0.288, + 0.879 + ], + "angle": 0, + "content": "C. Applications in Visual data" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.885, + 0.492, + 0.947 + ], + "angle": 0, + "content": "AD in visual data, encompassing images and videos, plays a vital role in numerous industries where visual inspection is critical. Applications range from detecting defects in manufacturing processes to identifying medical abnormalities in" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.07, + 0.923, + 0.189 + ], + "angle": 0, + "content": "imaging, monitoring public safety through surveillance systems, and ensuring quality control in production lines. By leveraging advanced deep learning techniques, AD methods can automatically identify and analyze irregularities with high precision, reducing reliance on manual inspection and improving efficiency. In this section, we explore key applications of visual data-based AD, highlighting its transformative impact across various domains." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.19, + 0.923, + 0.583 + ], + "angle": 0, + "content": "1) Medical Imaging: AD in medical imaging is indispensable across numerous medical specialties, playing a crucial role in the early detection and diagnosis of diseases. In radiology, it is employed to identify anomalies in X-rays [67], brain imaging [68], and CT scans [69], thereby aiding in the accurate diagnosis of various conditions. However, as [70] highlights, anomalies in medical images often closely resemble normal tissue, posing a significant challenge to detection due to their subtle differences. This similarity requires the use of sophisticated techniques to effectively distinguish between normal and anomalous data. For example, Draelos et al. [71] demonstrate the power of machine learning in radiology, significantly enhancing the classification performance for multiple abnormalities in chest CT volumes, achieving an AUROC greater than 0.90 for 18 different abnormalities. Additionally, Shvetsova et al. [72] showcase a novel method for AD in medical images, which dramatically improves the detection of subtle abnormalities in complex, high-resolution images, such as chest X-rays and pathology slides—scenarios where traditional models often fail. Furthermore, Zhao et al. [73] introduce the SALAD framework, which enhances AD in medical images by utilizing self-supervised and translation-consistent features from normal data. This approach is particularly effective in situations where labeled anomalous images are scarce, thereby improving detection accuracy in challenging medical imaging tasks." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.583, + 0.923, + 0.947 + ], + "angle": 0, + "content": "2) Video Monitoring: Video AD (VAD) has become increasingly crucial with the rise of large-scale multimedia data analysis, particularly in the processing of video data [74]. VAD focuses on identifying unusual patterns or behaviors in video footage that deviate from the norm, making it a vital tool in several domains. In security and surveillance, VAD is used to monitor public spaces, buildings, and secure areas, enabling the detection of suspicious activities, unauthorized access, and unusual crowd behaviors, thereby enhancing public safety [75]. In the realm of traffic monitoring, VAD facilitates the real-time identification of accidents and irregular traffic patterns, allowing for prompt response and management [76]. Additionally, VAD is applied in behavioral analysis to detect abnormal behaviors in various environments, such as schools, workplaces, and public transportation systems, contributing to the maintenance of safety and order. For example, Chen et al. [77] propose a bidirectional prediction framework specifically designed for AD in surveillance videos. This innovative approach employs forward and backward prediction subnetworks to predict the same target frame, constructing a loss function based on the real target frame and its bidirectional predictions. Experimental results demonstrate that this model outperforms existing approaches on various surveillance video datasets, including those featuring pedestrians and street scenes, showcas" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "8" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.07, + 0.493, + 0.099 + ], + "angle": 0, + "content": "ing its superior performance in accurately detecting anomalies in real-world surveillance scenarios." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.117, + 0.45, + 0.145 + ], + "angle": 0, + "content": "IV. DEEP LEARNING METHODS FOR ANOMALY DETECTION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.151, + 0.492, + 0.287 + ], + "angle": 0, + "content": "The application of deep learning to AD has revolutionized the way we identify irregularities in both time-based and non-time-based datasets [78]. Traditional methods, such as statistical analysis and clustering, have been commonly used to detect anomalies. However, these methods often struggle with high-dimensional data, complex relationships, and capturing intricate patterns. Deep learning models, with their ability to learn hierarchical representations and detect subtle anomalies, have emerged as powerful tools to overcome these limitations." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.287, + 0.492, + 0.378 + ], + "angle": 0, + "content": "As shown in Fig.2, this section introduces three major deep learning approaches applied to AD: reconstruction-based methods, prediction-based methods, and hybrid approaches. Each approach leverages the strengths of deep learning in distinct ways to improve AD accuracy, particularly in scenarios where data patterns are complex, unstructured, or temporal." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.398, + 0.493, + 0.426 + ], + "angle": 0, + "content": "A. Deep learning methods for Anomaly Detection based on Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.432, + 0.493, + 0.717 + ], + "angle": 0, + "content": "Reconstruction-based approaches operate by training a model to learn the underlying distribution of normal data [79]. Once trained, the model attempts to reconstruct incoming data. The reconstruction error, which is the difference between the original data and its reconstruction, is then used as an indicator of anomaly. A high reconstruction error suggests that the data is anomalous, as it deviates from the learned normal patterns. Deep learning-based reconstructive models have become prominent due to their ability to capture complex patterns in high-dimensional data. In recent years, most reconstruction-based AD models have been developed using techniques such as GAN, AE, and diffusion models. These models each have unique strengths and weaknesses, as summarized in Table I. This table consolidates insights from multiple studies, including [80], [81], [82], and [83], which have analyzed the advantages and limitations of GANs, VAEs, and Diffusion Models in AD. In this section, we introduce these three types of models in the context of AD and discuss their various variants." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.719, + 0.493, + 0.947 + ], + "angle": 0, + "content": "1) GAN-based Anomaly Detection: GANs are powerful tools for generating synthetic data that resembles a given training dataset [84]. As shown in the upper part of Fig.3, GANs consist of two main components: a generator and a discriminator, both of which are neural networks. Because of this structure, GAN models are highly flexible, allowing for different networks to be chosen as the generator and discriminator based on the specific task. This flexibility makes GANs a versatile framework for a wide range of applications. The generator \\(G\\) takes a random noise vector \\(z\\) (usually sampled from a Gaussian distribution) as input and generates synthetic data \\(G(z)\\). The discriminator \\(D\\) receives a data sample (either from the real dataset or from the generator) as input and outputs a probability \\(D(x)\\), representing the likelihood that the input is real (i.e., from the actual dataset) rather than fake (i.e.," + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.067, + 0.887, + 0.131 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.144, + 0.887, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.213, + 0.887, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.331, + 0.922, + 0.356 + ], + "angle": 0, + "content": "Fig. 2. Three types of anomaly detection: (a) Reconstruction-based approache, (b) Prediction-based approache, (c) Hybrid method." + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.375, + 0.889, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.601, + 0.84, + 0.614 + ], + "angle": 0, + "content": "Fig. 3. Structural Frameworks for GAN Anomaly Detection." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.643, + 0.922, + 0.795 + ], + "angle": 0, + "content": "generated by the generator). The generator and discriminator are trained simultaneously through a process where the generator tries to produce data that can fool the discriminator, and the discriminator tries to improve its ability to distinguish between real and fake data. Table II provides a comprehensive summary of recent GAN-based AD models, categorizing them based on their techniques, approaches, strengths, and weaknesses. This table highlights how different GAN variants are tailored for specific AD tasks, along with the types of data they are applied to and their publication years." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.796, + 0.922, + 0.827 + ], + "angle": 0, + "content": "The training process of GANs can be described as a minimax game with the following objective function:" + }, + { + "type": "equation", + "bbox": [ + 0.523, + 0.85, + 0.921, + 0.893 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\min _ {G} \\max _ {D} V (D, G) = \\mathbb {E} _ {x \\sim p _ {d a t a} (x)} [ \\log D (x) ] \\\\ + \\mathbb {E} _ {z \\sim p _ {z} (z)} [ \\log (1 - D (G (z))) ]. \\quad (1) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.922, + 0.947 + ], + "angle": 0, + "content": "In this function, \\( p_{data}(x) \\) represents the distribution of the real data, \\( p_z(z) \\) represents the distribution of the noise vector \\( z \\), \\( G(z) \\) is the data generated by the generator, and \\( D(x) \\) is the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.262, + 0.072, + 0.735, + 0.094 + ], + "angle": 0, + "content": "TABLEI COMPARISON OF GANS, VAES, AND DIFFUSION MODELS IN ANOMALY DETECTION" + }, + { + "type": "table", + "bbox": [ + 0.096, + 0.105, + 0.903, + 0.212 + ], + "angle": 0, + "content": "
ModelStrengthsWeaknesses
GANs• Capable of generating high-fidelity, realistic samples.\n• Learns complex data distributions using adversarial loss.\n• Useful in AD by distinguishing real vs. generated data.• Prone to mode collapse, leading to low sample diversity.\n• Hard to train with difficult-to-interpret losses.\n• Training is unstable and hard to converge.
VAEs• Easy to train with one tractable likelihood loss.\n• Provides high sample diversity by covering all data modes.\n• Latent space representation is useful for AD tasks.• Produces low-fidelity, often blurry samples.\n• Pixel-based loss leads to sample ambiguity and blurriness.
Diffusion Models• Generates high-fidelity samples with gradual refinement.\n• High sample diversity due to likelihood maximization.\n• Intermediate noisy images serve as useful latent codes for AD.• Slow sample generation due to the multi-step denoising process.\n• Computationally intensive, requiring many steps for both forward and reverse diffusion.
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.237, + 0.492, + 0.327 + ], + "angle": 0, + "content": "probability that \\( x \\) is real. The generator \\( G \\) aims to minimize this objective, while the discriminator \\( D \\) aims to maximize it. The discriminator updates its weights to maximize the probability of correctly classifying real and generated data, while the generator updates its weights to minimize the discriminator's ability to distinguish between real and fake data." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.327, + 0.493, + 0.644 + ], + "angle": 0, + "content": "In the context of AD, GANs play crucial roles in both representation learning and data augmentation, each serving distinct purposes within deep Learning [85]. In representation learning, the primary objective of GANs is to learn and model the underlying distribution of the data, enabling the generation of synthetic data that closely resembles real data. This process involves a generator that creates fake data from random noise and a discriminator that distinguishes between real and fake data. Through iterative training, the generator improves its ability to produce realistic data, which is particularly useful in tasks like AD. For example, in [86], GANs are used for representation learning by generating fake data that matches the distribution of normal data. This generated data is then used to train a VAE to detect anomalies through reconstruction errors. Similarly, in [87], a fault-attention generative probabilistic adversarial autoencoder (FGPAA) is proposed, combining GANs and autoencoders for AD by learning the low-dimensional manifold of healthy state data. The GAN component aids in feature representation learning, reducing signal information loss and enhancing the model's ability to detect anomalies through distribution probability and reconstruction error." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.644, + 0.493, + 0.914 + ], + "angle": 0, + "content": "There are two main structures to using GANs for AD, as shown in Fig.3. The first approach is based on the generator, as depicted in the lower part of Fig.3, highlighted by the yellow box. The basic idea is to train the GAN on normal data and then use the reconstruction error to identify anomalies. During the training phase, the GAN is trained exclusively on normal data, allowing the generator to learn to produce data that closely mimics the normal data distribution. During the detection phase, a test data point \\( x \\) is fed into the generator to obtain the reconstructed data \\( G(x) \\). The reconstruction error, typically measured as the difference between the original data point \\( x \\) and the reconstructed data \\( G(x) \\), is then used to detect anomalies. This can be quantified using metrics such as mean squared error (MSE). If the reconstruction error exceeds a predefined threshold, the data point is classified as an anomaly. The intuition behind this approach is that the generator, trained solely on normal data, will struggle to accurately reconstruct anomalous data, resulting in a high reconstruction error." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.946 + ], + "angle": 0, + "content": "The mathematical representation for AD using GANs involves computing the reconstruction error \\( E(x) \\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.637, + 0.251, + 0.921, + 0.268 + ], + "angle": 0, + "content": "\\[\nE (x) = \\| x - G (x) \\| ^ {2}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.275, + 0.922, + 0.518 + ], + "angle": 0, + "content": "where \\(\\| \\cdot \\| ^2\\) denotes the squared Euclidean distance. A threshold \\(\\tau\\) is set, and if \\(E(x) > \\tau\\), the data point \\(x\\) is considered an anomaly. For example, Dong et al. [88] propose a semi-supervised approach for video AD using a dual discriminator-based GAN structure, focusing on representation learning. In this approach, the generator predicts future frames for normal events, and anomalies are detected by evaluating the quality of these predictions. Similarly, Guo et al. [89] introduce RegraphGAN, a graph generative adversarial network specifically designed for dynamic graph AD. RegraphGAN utilizes GAN-based representation learning to encode complex spatiotemporal relationships in graph data, allowing it to better capture anomalies. By leveraging encoders to project input samples into a latent space and integrating GANs to enhance both training stability and efficiency, RegraphGAN significantly improves AD performance over existing methods." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.52, + 0.922, + 0.776 + ], + "angle": 0, + "content": "The second approach leverages the discriminator highlighted by the green box in Fig.3. A well-trained discriminator has the ability to differentiate between real (normal) and fake (anomalous) samples. During the detection phase, test samples are directly input to the discriminator, which evaluates the likelihood that a given sample is real. If the discriminator assigns a low probability to a sample, suggesting that it is likely fake or anomalous, the sample is flagged as an anomaly. This method relies on the discriminator's capacity to recognize deviations from the normal data distribution it learned during training. For instance, Liu et al. [90] propose a GAN framework that uses multiple generators to produce potential outliers, which are then distinguished from normal data by a discriminator to detect anomalies. The discriminator's output score is used to evaluate the anomaly degree of input data, providing a comprehensive reference distribution and preventing mode collapse." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.779, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Additionally, GANs are highly effective in data augmentation, helping to mitigate the scarcity of anomaly samples, which often results in data imbalance and poor generalization [91]. When anomaly samples are unevenly distributed or lacking in diversity, models struggle to learn rare anomalies and can overfit to the training set, reducing their accuracy on unseen data. Traditional data augmentation techniques—such as scaling, rotation, random cropping, translation, flipping, and copy-paste—attempt to mitigate these issues. However, simple linear transformations fail to capture new distributions and features of unknown anomalies, such as random changes in" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "10" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.417 + ], + "angle": 0, + "content": "shape or texture. This is where GANs provide a significant advantage. By generating synthetic anomaly data that mimics the distribution of real-world anomalies, GANs enable models to learn a more diverse set of anomaly features. This not only addresses the imbalance problem but also improves the model's generalization capabilities, as it learns to detect anomalies based on a broader range of characteristics beyond those present in the original training dataset. Miao et al. [92] introduce an unsupervised AD framework that uses data augmentation through contrastive learning and GANs to mitigate overfitting. By employing a geometric distribution mask, it enhances data diversity and generates synthetic anomaly samples, addressing the scarcity of anomaly data. In [93], Anomaly-GAN addresses data augmentation by using a mask pool, anomaly-aware loss, and local-global discriminators to generate high-quality, realistic synthetic anomalies with diverse shapes, angles, spatial locations, and quantities in a controllable manner. Li et al. [94] propose augmented time regularized generative adversarial network that combines an augmented filter layer and a novel temporal distance metric to generate high-quality and diverse artificial data, addressing the limitations of existing GAN approaches in handling limited training data and temporal order." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.417, + 0.493, + 0.688 + ], + "angle": 0, + "content": "2) AE-based Anomaly Detection: In recent years, the limitations of traditional AE models in handling complex and noisy data have become more apparent, leading to the development of enhanced methods to improve their performance in AD tasks. For example, Fan et al. [97] introduce a new framework by incorporating \\(\\ell_{2,1}\\)-norm into the AE, and experiments have demonstrated that this framework can significantly improve ADn accuracy by increasing the model's robustness to noise and outliers during training. Wang et al. [98] demonstrate that introducing an adaptive-weighted loss function can effectively suppress anomaly reconstruction, thereby improving the accuracy of AD. Liu et al. [99] introduce a multi-scale convolutional AE architecture, where multiple stacked convolutional encoder-decoder layers act as background learners to robustly eliminate anomalies of varying sizes during background reconstruction. Additionally, Lin et al. [100] introduce a soft calibration strategy combined with AE to address the issue of data contamination in AD." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.688, + 0.493, + 0.868 + ], + "angle": 0, + "content": "VAEs are another generative model widely used in AD tasks. Like GANs, VAEs aim to learn the distribution of normal data to identify anomalies. However, unlike GANs, which rely on adversarial training between a generator and a discriminator, VAEs use an encoder-decoder architecture. Fig.4 illustrates the structure of AD based on VAE. The goal of a VAE is to map the input data into a latent space through the encoder and model the data distribution probabilistically within this space. This approach allows the VAE to generate new data that closely resembles the true data distribution, and anomalies can be detected by evaluating the reconstruction error." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.87, + 0.493, + 0.947 + ], + "angle": 0, + "content": "The internal structure of a VAE is similar to that of a traditional AE but with some key differences. First, the encoder in a VAE not only compresses the input data into a lower-dimensional latent space but also learns a probabilistic distribution, typically parameterized by a mean \\(\\mu\\) and a vari" + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.068, + 0.889, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.21, + 0.837, + 0.224 + ], + "angle": 0, + "content": "Fig. 4. Structural Frameworks for VAE Anomaly Detection." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.248, + 0.925, + 0.401 + ], + "angle": 0, + "content": "ance \\(\\sigma^2\\) as shown in Fig.4. This enables the VAE to generate more meaningful latent variables \\(z\\), enhancing the diversity and robustness of the generated data. A critical component introduced in VAEs is the Kullback-Leibler (KL) divergence, which measures the difference between the latent distribution generated by the encoder and a predefined prior distribution (usually a standard normal distribution). Unlike traditional AEs, which focus solely on minimizing the reconstruction error, VAEs are trained by minimizing a combination of the reconstruction error and the KL divergence:" + }, + { + "type": "equation", + "bbox": [ + 0.548, + 0.408, + 0.921, + 0.426 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {V A E}} = \\mathbb {E} _ {q (z | x)} [ \\log p (x | z) ] - D _ {\\mathrm {K L}} (q (z | x) \\| p (z)). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.432, + 0.925, + 0.914 + ], + "angle": 0, + "content": "This difference makes VAEs more powerful in AD because they not only consider the quality of the data reconstruction but also enforce a structured latent space through the KL divergence. By doing so, KL divergence helps to regularize the latent space, ensuring that the encoded representations are smoothly distributed and centered around the prior distribution. This regularization reduces overfitting, promotes better generalization, and makes it easier to distinguish between normal and anomalous data, especially in complex and high-dimensional datasets. Table III provides a comprehensive summary of the latest advancements in VAE-based AD models, showcasing innovative enhancements that address various challenges such as noise robustness, semantic feature learning, and anomaly reconstruction. Huang et al. [101] enhance VAE-based AD by incorporating an Autoencoding Transformation into the model, which ensures that the training phase effectively captures high-level visual semantic features of normal images, thereby increasing the anomaly score gap between normal and anomalous samples. Similarly, Yin et al. [102] utilize Convolutional Neural Network (CNN) and VAE with a two-stage sliding window approach in data preprocessing to learn better representations for AD tasks. Zhang Yin et al. [103] propose the Graph Relational Learning Network (GReLeN), which integrates a VAE structure with graph dependency learning for AD in multivariate time series through reconstruction. Zhou et al. [104] propose a variational long short-term memory (VLSTM) model for high-dimensional AD in imbalanced datasets, combining a compression network for efficient data representation with an estimation network for accurate classification of network traffic data. The VLSTM model balances data compression and feature retention using core LSTM and variational modules." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.946 + ], + "angle": 0, + "content": "In recent years, many advancements in AD models inspired by VAEs have focused on Adversarial Autoencoders (AAEs)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.041 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.92, + 0.041 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.362, + 0.071, + 0.635, + 0.094 + ], + "angle": 0, + "content": "TABLE II GAN-BASED MODELS IN ANOMALY DETECTION" + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.103, + 0.933, + 0.585 + ], + "angle": 0, + "content": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[90]GANReconstructionDoes not depend on assumptions about the normal data and requires less computing resources.The method involves the selection of multiple hyperparameters, making the tuning process challenging and potentially time-consuming.Structured data2020
[48]GAN+CNNPredictionThe NM-GAN model enhances both the generalization and discrimination abilities through noise-modulated adversarial learning, resulting in improved accuracy and stability for video AD.The model struggles to fully capture complex temporal patterns like staying, wandering, and running, and lacks adaptive modulation of generalization and discrimination abilities, leaving room for improvement in spatiotemporal feature learning.Video data2021
[94]GANReconstructionIs capable of generating more effective artificial samples for training supervised learning models, thereby addressing the issue of data imbalance.Its performance is inferior to the baseline algorithms when the balanced ratio is 0.125.Image data2021
[95]GAN+LSTMPredictionThe TMANomaly framework excels in capturing complex multivariate correlations in industrial time series data, enhancing AD accuracy through mutual adversarial training.The paper lacks discussion on TMANomaly's generalization to other datasets, the potential limitations of using GRA for feature selection, and the computational efficiency or scalability, which are critical for real-time industrial systems.Multivariate time series data2022
[96]GAN+LSTMPredictionFGANomaly method effectively filters anomalous samples before training, improving AD accuracy and robustness by precisely capturing normal data distribution and dynamically adjusting generator focus.The method lacks effective fusion of information across different dimensions in multivariate time series, which limits its ability to fully capture complex correlations.Multivariate time series data2022
[93]GANReconstructionImproves the quality of the generated anomaly images and generates anomalies with different shapes, rotation angles, spatial locations, and numbers in a controllable manner.The images generated are not very sensitive to the change of light.Image data2023
[89]GANReconstructionImproves training efficiency and stability in dynamic graph AD while avoiding the expensive optimization process typical of traditional graph generative adversarial networks.The detection accuracy on the UCI Message dataset is lower than that of TADDY.Dynamic graph data2023
[92]GAN+TransformerReconstructionIt can effectively detect anomalies in long sequences, mitigates overfitting, and incorporates contrastive loss into the discriminator to fine-tune the GAN, ensuring strong generalization ability.It may struggle with irregularly sampled data or datasets with many missing values, requires careful tuning of several hyperparameters, and demands significant computational resources, posing challenges for real-time processing on limited-capacity devices.Multivariate time series data2024
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.611, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[105]. Unlike traditional VAEs, which use KL divergence to match the latent space distribution to a prior, AAEs achieve this through the use of GANs. Specifically, AAEs employ a GAN's discriminator to evaluate the latent variable distribution produced by the encoder and use adversarial training to align it with the desired prior distribution, providing more flexible control over the quality of the generated data. Wu et al. [87] propose the Fault-Attention Generative Probabilistic Adversarial Autoencoder (FGPAA) for machine AD, utilizing an end-to-end AAE with double discriminators to extract relevant features and ensure accurate equipment health monitoring through a fault-attention probability distribution. Idrissi et al. [51] apply AAE and FL in the field of network intrusion detection, effectively ensuring AD performance while safeguarding client privacy. Experimental results demonstrate that the proposed model outperforms AE, VAE, and AAE on various network traffic datasets, achieving high performance across different metrics. Su et al. [106] propose two contamination-immune BiGAN models, integrating elements of VAE and BiGAN to create a new AAE-based framework that effectively detects anomalies by learning the probability distribution of normal samples from contaminated datasets, significantly outperform" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.611, + 0.923, + 0.775 + ], + "angle": 0, + "content": "ing state-of-the-art methods in scenarios where training data is impure. Similar to the aforementioned AAE models, Du et al. use GANs to purify the original dataset, generating synthetic \"normal\" data to improve outlier detection accuracy. Continuing the advancements in AAE-based models, Yu et al. [107] introduce an Adversarial Contrastive Autoencoder (ACAE) for Multivariate Time Series (MTS) AD, which enhances feature representation through adversarial training and contrastive learning, demonstrating superior performance across multiple real-world datasets, further extending the application of AAE-based methods in robust AD." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.778, + 0.923, + 0.883 + ], + "angle": 0, + "content": "3) Diffusion model-Based for Anomaly Detection: Diffusion models are a type of generative model that operate through two key phases: a fixed forward diffusion process and a learnable reverse diffusion process [108]. Mathematically, the forward process involves progressively adding Gaussian noise to the data \\( x_0 \\), transforming it into pure noise \\( x_T \\) over \\( T \\) steps. This process can be described as:" + }, + { + "type": "equation", + "bbox": [ + 0.575, + 0.889, + 0.922, + 0.908 + ], + "angle": 0, + "content": "\\[\nq \\left(x _ {t} \\mid x _ {t - 1}\\right) = \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t}} x _ {t - 1}, \\beta_ {t} I\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.946 + ], + "angle": 0, + "content": "where \\( q(x_{t}|x_{t - 1}) \\) is the conditional probability distribution of \\( x_{t} \\) given \\( x_{t - 1} \\), \\( \\beta_{t} \\) is the noise variance at step \\( t \\), and \\( x_{t} \\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "12" + }, + { + "type": "table_caption", + "bbox": [ + 0.334, + 0.071, + 0.665, + 0.094 + ], + "angle": 0, + "content": "TABLE III AUTOENCODER-BASED MODELS IN ANOMALY DETECTION" + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.103, + 0.916, + 0.606 + ], + "angle": 0, + "content": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[104]VAE-based (VAE+LSTM)ReconstructionEffectively addresses imbalanced and high-dimensional challenges in industrial big data.Falls short in achieving the highest AUC and F1 scores compared to other methods.Industrial big data2020
[87]AAE-basedReconstructionFGPAA reduces information loss during feature extraction and constructs fault attention anomaly indicators using low-dimensional feature probability and reconstruction error.Runtime is approximately five times longer than SOM.Rotating machine fault simulator data2020
[98]AE-based (AE+CNN)ReconstructionThe Auto-AD method enables fully autonomous hyperspectral AD, automatically separating anomalies based on reconstruction errors without the need for manual tuning or additional processing.Lower AUC score compared to the GRX method on the Honghu dataset.Hyperspectral data2021
[99]AE-based (AE+CNN)ReconstructionMSNet offers an effective solution to handle multiscale anomaly shapes, providing greater flexibility without the need for threshold fine-tuning.Multiple convolutional encoder-decoder layers and enhanced training increase computational cost and training time.Hyperspectral data2021
[101]VAE-based (VAE+Transformer)ReconstructionSSR-AE leverages self-supervised learning to enhance normal data reconstruction and hinder abnormal data, optimizing mutual information for effective transformation and image reconstruction.Struggles with transformations, heavily relying on their effectiveness for AD.Image data2021
[97]AE-basedReconstructionMaintains geometric structure and local spatial coherence of hyperspectral images (HSI), reducing search space and execution time per pixel.High execution time for constructing the SuperGraph matrix with large datasets.Hyperspectral data2021
[51]AAE-based (AAE+Federated learning)ReconstructionFed-ANIDS demonstrates strong generalization, outperforms GAN-based models, and ensures privacy protection through federated learning.Computational overhead due to the federated learning framework, increasing training complexity and latency.Cybersecurity data2023
[100]AE-basedReconstructionApplicable for time series AD under data contamination.Assumes normal samples follow a Gaussian distribution, limiting applicability, and has higher computational complexity.Time series data2024
[106]AAE-basedReconstructionLearns the probability distribution of normal samples from contaminated datasets, achieving convergence and outperforming baseline models.Relies on the assumption that the contamination ratio is known, which may not always be accurate in practice.Medical image data2024
[86]AAE-basedReconstructionGenerates a clean dataset from contaminated data for AD, with linear scalability for larger datasets.Struggles with detection accuracy in datasets with multiple distribution patterns.Tabular data2024
[107]AAE-basedReconstructionExcels in learning high-level semantic features and capturing normal patterns of MTS with contrastive learning constraints, ensuring stability across parameter settings.Performance on all metrics for SMAP and PSM datasets is lower than baseline methods.Multivariate time series data2024
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.633, + 0.492, + 0.679 + ], + "angle": 0, + "content": "represents the noisy data at step \\( t \\). As \\( t \\) increases, the data becomes more corrupted by noise until it reaches a state of pure Gaussian noise at step \\( T \\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.686, + 0.491, + 0.779 + ], + "angle": 0, + "content": "The reverse process learns to gradually denoise the data, removing the added noise step by step. The model learns a parameterized distribution \\( p_{\\theta}(x_{t - 1}|x_t) \\) to reverse the noise addition process, reconstructing the original data from the noisy data. This reverse process is trained to minimize the variational bound on the data likelihood, expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.13, + 0.785, + 0.49, + 0.803 + ], + "angle": 0, + "content": "\\[\nL = \\mathbb {E} _ {q} \\left[ D _ {K L} \\left(q \\left(x _ {t - 1} \\mid x _ {t}, x _ {0}\\right) \\mid p _ {\\theta} \\left(x _ {t - 1} \\mid x _ {t}\\right)\\right) \\right]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.809, + 0.492, + 0.947 + ], + "angle": 0, + "content": "By progressively removing noise, diffusion models generate high-fidelity samples, first capturing coarse structures and then refining details in each step. In the context of AD, diffusion models are trained on normal data to learn the underlying data distribution through an iterative noise-removal process. Similar to other reconstruction-based methods, anomalies can be identified by evaluating the reconstruction error, where a higher error indicates that the data deviates from the learned normal patterns." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.632, + 0.923, + 0.755 + ], + "angle": 0, + "content": "Diffusion models stand out from GANs and VAEs in several key ways. They avoid common issues such as mode collapse in GANs, where only a subset of the data distribution is captured, leading to reduced diversity. Diffusion models also overcome the blurriness associated with VAEs, which often results from pixel-based loss and a smaller latent space. By iteratively denoising data, diffusion models maintain both high fidelity and diversity in their outputs." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.764, + 0.923, + 0.947 + ], + "angle": 0, + "content": "While diffusion models are slower in generating samples due to their iterative nature, their ability to accurately reconstruct data and cover the full range of the training dataset makes them particularly well-suited for AD [109]. In AD, where precision is critical, diffusion models excel by generating detailed and high-quality samples, enabling them to identify subtle deviations from normal patterns with greater accuracy than other generative models. Several works have leveraged the advantages of diffusion models in ADn. For example, Zhang et al. [110] utilize the high-quality and diverse image generation capabilities of diffusion models to enhance reconstruction quality in DiffAD, addressing the limitations of" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.434 + ], + "angle": 0, + "content": "traditional methods by introducing noisy condition embedding and interpolated channels. Similarly, Li et al. [111] apply a diffusion model to reconstruct normal data distributions and integrate an auxiliary learning module with pretext tasks to better distinguish between normal and abnormal data. Expanding on these ideas, Zeng et al. [112] improve denoising diffusion probabilistic models (DDPMs) for radio AD by incorporating an AE to learn the distribution of normal signals and their power spectral density (PSD), using reconstruction error to identify anomalies. Li et al. [113] present a Controlled Graph Neural Network (ConGNN) approach based on DDPMs to address the challenge of limited labeled data. Li et al. [114] further explore diffusion models in vehicle trajectory AD, employing decoupled Transformer-based encoders to capture temporal dependencies and spatial interactions among vehicles, significantly improving AUC and F1 scores on real-world and synthetic datasets. Similarly, Pei et al. [115] establish the two-stage diffusion model (TSDM) to mitigate the influences of anomalies in smart grids, where the first stage is a diffusion-based AD component. In multi-class AD, He et al. [116] propose DiAD, a framework that enhances reconstruction accuracy through a combination of a semantic-guided network, spatial-aware feature fusion, and a pre-trained feature extractor to generate anomaly maps." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.445, + 0.493, + 0.474 + ], + "angle": 0, + "content": "B. Deep learning methods for Anomaly Detection based on Prediction" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.478, + 0.495, + 0.884 + ], + "angle": 0, + "content": "Prediction-based AD methods operate by forecasting future values or estimating missing attributes and comparing these predictions to the actual observed values. When significant deviations occur, it indicates potential anomalies, as the data deviates from the learned normal patterns. These methods are versatile and can be applied across various data types, leveraging relationships between variables or temporal correlations to detect anomalies. Prediction-based methods excel in scenarios where capturing patterns and trends is essential. By learning underlying structures in the data, whether based on time dependencies or more general interactions between variables, these methods can effectively predict expected outcomes. Deviations from these expectations are flagged as anomalies. This makes prediction-based approaches highly adaptable, capable of functioning across different contexts, including various types of data. In this section, we explore three main approaches for prediction-based AD: Recurrent Neural Networks (RNNs), attention mechanisms, and Graph Neural Networks (GNNs), all of which have demonstrated efficacy in capturing intricate patterns and relationships within data to identify anomalies. These methods allow for flexible and robust AD across various data types by learning underlying patterns, whether they are based on spatial, temporal, or graph-based relationships. By leveraging these approaches, prediction-based methods can effectively model complex interactions, providing reliable detection of unexpected behaviors or deviations from learned patterns." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.885, + 0.493, + 0.947 + ], + "angle": 0, + "content": "1) RNN-based Anomaly Detection: Recurrent Neural Networks (RNNs) [117] are a special type of neural network designed to process sequential data by capturing dependencies between elements in a sequence. Unlike standard neural" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.069, + 0.923, + 0.19 + ], + "angle": 0, + "content": "networks, RNNs incorporate a state vector \\( s_t \\) in the hidden layer, allowing them to retain information from previous steps and model sequential patterns. This capability makes them effective in various applications where data has an inherent order, such as event logs, system monitoring, and structured sequences in cybersecurity or industrial processes. For an input \\( x_t \\) at time \\( t \\), the update of the state value \\( s_t \\) and hidden layer output \\( h_t \\) in RNNs can be represented as" + }, + { + "type": "equation", + "bbox": [ + 0.6, + 0.196, + 0.921, + 0.22 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {s} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {s} \\boldsymbol {s} _ {t - 1} + \\boldsymbol {b} ^ {s}\\right) \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.217, + 0.792, + 0.233 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {h} _ {t} = \\operatorname {s o f t m a x} \\left(\\boldsymbol {W} ^ {h} \\boldsymbol {s} _ {t} + \\boldsymbol {b} ^ {h}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.239, + 0.922, + 0.315 + ], + "angle": 0, + "content": "where \\(\\sigma(\\cdot)\\) is the sigmoid activation function, \\(W^x\\), \\(W^s\\) and \\(W^h\\) represent the network weights, and \\(b\\) is the network biases. By maintaining a recurrent state, RNNs can effectively capture dependencies across different steps within a sequence, making them well-suited for tasks involving ordered data." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.317, + 0.922, + 0.424 + ], + "angle": 0, + "content": "However, RNNs face the problem of exploding or vanishing gradients when dealing with long sequences. Long Short-Term Memory networks (LSTMs) [118], a specialized type of RNN, were introduced to address these issues. Specifically, LSTMs replace the hidden layer of RNNs with an LSTM block consisting of input, output, and forget gates. The inference process of LSTM at time \\( t \\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.429, + 0.83, + 0.448 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {f} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x f} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h f} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {f}\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.449, + 0.821, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {i} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x i} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h i} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {i}\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.585, + 0.468, + 0.921, + 0.493 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {c}} _ {t} = \\tanh \\left(\\boldsymbol {W} ^ {x \\tilde {c}} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h \\tilde {c}} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {\\tilde {c}}\\right) \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.585, + 0.49, + 0.715, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c} _ {t} = \\boldsymbol {f} _ {t} \\boldsymbol {c} _ {t - 1} + \\boldsymbol {i} _ {t} \\tilde {\\boldsymbol {c}} _ {t}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.508, + 0.826, + 0.526 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {o} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x o} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h o} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {o}\\right)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.528, + 0.706, + 0.544 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {h} _ {t} = \\boldsymbol {o} _ {t} \\tanh \\left(\\boldsymbol {c} _ {t}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.549, + 0.923, + 0.732 + ], + "angle": 0, + "content": "where \\( f_{t} \\), \\( i_{t} \\), and \\( o_{t} \\) are the forget, input and output gate weights, respectively. \\( c_{t} \\) represents the cell state of LSTM, and \\( \\tanh(\\cdot) \\) is the hyperbolic tangent activation function. By controlling the weights of the forget, input, and output gates, LSTM determines the importance of historical time series information and the current input on the current output, thus effectively mitigating issues of gradient vanishing and allowing robust modeling of complex sequences. Reference [119] provides comprehensive evidence of LSTM's effectiveness in AD across various technical systems, demonstrating its superiority in learning complex temporal behaviors and accurately identifying anomalies." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.733, + 0.923, + 0.945 + ], + "angle": 0, + "content": "The Gated Recurrent Unit (GRU) [120] is a simplified version of LSTM that only includes an update gate and a reset gate and uses the hidden state alone to represent both short-term and long-term information. These different types of RNNs can be used in prediction-based AD tasks, with the specific detection and inference method illustrated in Fig. 5. RNNs, LSTMs, and GRUs take time series data from \\( t - w \\) to \\( t - 1 \\) as input, and their pre-trained neural networks use these temporally ordered data to predict the single-step or multi-step future values of the univariate or multivariate time series. If the difference between the actual and predicted values is below a threshold, no anomaly is detected; if the difference exceeds the threshold, an anomaly is detected and the spatiotemporal location of the anomaly is identified." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "14" + }, + { + "type": "table_caption", + "bbox": [ + 0.346, + 0.071, + 0.654, + 0.095 + ], + "angle": 0, + "content": "TABLE IV DIFFUSION-BASED MODELS IN ANOMALY DETECTION" + }, + { + "type": "table", + "bbox": [ + 0.098, + 0.104, + 0.904, + 0.424 + ], + "angle": 0, + "content": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[110]DiffusionReconstructionThe latent diffusion model (LDM) used in this method achieves state-of-the-art performance in surface AD by generating high-quality, semantically correct reconstructions, effectively avoiding overfitting to anomalies.It less suitable for real-time applications or environments with limited computational resources.Image data2023
[112]Diffusion+VAEReconstructionThe AE-DDPMs algorithm effectively improves stability and reduces computational costs in radio AD, outperforming GAN-based methods in complex electromagnetic environments.The anomalies in the experimental data are artificially generated, rather than originating from real-world conditions, which may limit the model's applicability to genuine, real-world scenarios.radio signal data2023
[113]Diffusion+GNNPredictionConGNN effectively addresses the issue of limited labeled data by generating augmented graph data using a graph-specific diffusion model.The reliance on graph-specific augmentation might not generalize well to other types of data, potentially limiting its applicability beyond graph-based AD.Image data2023
[111]Diffusion+VAEHybridSDAD effectively enhances AD by combining self-supervised learning for discriminative data representation with denoising diffusion.The generation of pseudo anomalies relies solely on standard Gaussian sampling, which may not fully capture the complexity of real anomalies, limiting the model's ability to accurately simulate genuine abnormal data.Structure data2024
[114]Diffusion+TransformerHybridDiffTAD effectively models temporal dependencies and spatial interactions in vehicle trajectories through diffusion models, significantly improving AD accuracy and robustness to noise.The anomalies are primarily evaluated on synthetic datasets, which may not fully reflect the complexity and diversity of real-world trajectory data.Vehicle trajectory data2024
" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.428, + 0.922, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.565, + 0.784, + 0.58 + ], + "angle": 0, + "content": "Fig. 5. RNN-based application example for time series data anomaly detection: (a) RNN-based, (b) LSTM-based, (c) GRU-based." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.597, + 0.494, + 0.946 + ], + "angle": 0, + "content": "Current RNN-based AD primarily focuses on improving RNN algorithms tailored to AD tasks and integrating RNN with other methods for AD. The method in [121] employs a pruning algorithm to reduce the number of false data points, enabling the LSTM-based AD approach to better address the challenges posed by the extremely uneven distribution of railway traffic data. LSTM combined with AE [122], VAE [123], and Singular Value Decomposition (SVD) [124] has also been used to identify anomalies in Controller Area Networks (CANs) [125], electrocardiograms, and Internet monitoring data. GANs based on adversarial learning have also been integrated into the time series learning of LSTM, achieving very high performance in scenarios with few features [95], extremely imbalanced training sets, and noise interference [96]. CNN is also integrated into LSTM in a serial [126], parallel [127], or as a foundational layer [128] to better extract the spatiotemporal correlations of multidimensional time series, thereby enhancing the performance of AD. GRUs, compared to LSTMs, have a more streamlined architecture, resulting in lower computational complexity during training and execution of AD tasks, and they tend to perform better on certain less complex sequential data. For instance, GRUs enhance interpretability by uncovering latent correlations in" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.597, + 0.921, + 0.671 + ], + "angle": 0, + "content": "multivariate time series data from industrial control system sensors [129]. Similar to LSTMs, GRUs can also be combined with AEs [130] or VAEs [25] in an encoder-decoder architecture to mitigate the effects of noise and anomalies, thereby improving the accuracy of AD." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.673, + 0.922, + 0.884 + ], + "angle": 0, + "content": "2) Attention-based Anomaly Detection: The attention mechanism was initially applied in machine translation [131], with its core idea being to enable the neural network to focus on the relevant parts of the input values. While attention-based methods have shown great promise in time series AD, their applications are not limited to temporal data. These methods can effectively capture dependencies in various types of data, including spatial, spatiotemporal, and multimodal datasets. This flexibility broadens their use cases across different AD tasks. Compared to RNN-based approaches, they are better suited for long or complex sequences because attention can compute dependencies between all positions in the sequence simultaneously, while RNNs process sequences sequentially, step by step." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.885, + 0.922, + 0.947 + ], + "angle": 0, + "content": "Figure 6 illustrates a typical attention-based model for AD. Among attention-based methods, the self-attention mechanism is particularly effective in capturing global dependencies across various types of sequential data, including temporal," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "15" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.066, + 0.868, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.221, + 0.924, + 0.268 + ], + "angle": 0, + "content": "Fig. 6. Attention-based model for anomaly detection. The model first embeds sequential data using input embedding and positional encoding to preserve temporal dependencies. The multi-head attention mechanism captures long-range dependencies by processing interactions between all time steps. The feedforward layer then refines feature representations, and a dense interpolation layer enhances anomaly-related features before passing them to a fully connected network (FNN) for final AD." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.286, + 0.492, + 0.363 + ], + "angle": 0, + "content": "spatial, and spatiotemporal inputs. For an input dataset \\( \\mathbf{X} = [x_{1}, x_{2}, \\dots, x_{t}] \\), the queries, keys, and values are defined as: \\( Q = X W_{Q} \\), \\( K = X W_{K} \\), and \\( V = X W_{V} \\), where \\( W_{Q} \\), \\( W_{K} \\), and \\( W_{V} \\) are trainable weight matrices. The attention weights are then computed based on \\( Q \\), \\( K \\), and \\( V \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.367, + 0.491, + 0.408 + ], + "angle": 0, + "content": "\\[\n\\alpha_ {i j} = \\frac {\\exp \\left(\\boldsymbol {Q} _ {i} \\boldsymbol {K} _ {j} ^ {\\top} / \\sqrt {\\boldsymbol {d} _ {k}}\\right)}{\\sum_ {j = 1} ^ {T} \\exp \\left(\\boldsymbol {Q} _ {i} \\boldsymbol {K} _ {j} ^ {\\top} / \\sqrt {\\boldsymbol {d} _ {k}}\\right)}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.412, + 0.492, + 0.474 + ], + "angle": 0, + "content": "where \\( d_k \\) is the dimension of the keys. Finally, the output of the self-attention-based neural network, which takes into account the importance of each input value, is given by Attention \\( (Q, K, V) = \\alpha V \\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.478, + 0.491, + 0.524 + ], + "angle": 0, + "content": "To enable the model to capture features of various patterns, multi-head attention is also well-suited for AD. The calculation of multiple heads is expressed as" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.531, + 0.49, + 0.562 + ], + "angle": 0, + "content": "\\[\n\\operatorname {M u l t i h e a d} \\left(\\boldsymbol {Q}, \\boldsymbol {K}, \\boldsymbol {V}\\right) = \\operatorname {C o n c a t} \\left(\\operatorname {h e a d} _ {1}, \\dots , \\operatorname {h e a d} _ {h}\\right) \\boldsymbol {W} _ {O}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.563, + 0.493, + 0.743 + ], + "angle": 0, + "content": "where each head is computed as \\(\\mathrm{head}_i =\\) Attention \\((\\mathbf{Q}\\mathbf{W}_{Q_i},\\mathbf{K}\\mathbf{W}_{K_i},\\mathbf{V}\\mathbf{W}_{V_i})\\) . Here, \\(W_{Q_i}\\) \\(W_{K_i}\\) and \\(W_{V_i}\\) are trainable parameters for different heads, and \\(W_{O}\\) is the linear transformation matrix for the output. Concat(head1,,headh) concatenates the outputs of all attention heads along the feature dimension. Attention-based methods can effectively capture long-term dependencies, improve computational efficiency, and enhance the interpretability of AD through visualized attention weight values. When applied to AD, differences in the distribution of attention weights between normal and anomalous time series can serve as the basis for AD." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.749, + 0.492, + 0.947 + ], + "angle": 0, + "content": "In the field of AD, particularly for time series data, there has been a growing number of studies proposing deep learning methods based on attention mechanisms. Autoencoders that combine convolution, LSTM, and self-attention mechanisms can better extract complex features from multivariate time series data and robustly detect anomalies in high noise conditions [132]. The Transformer, as a well-known attention-based model, has demonstrated superior performance in unsupervised prediction-based time series AD compared to LSTM, as it can learn the dynamic patterns of sequential data through self-attention mechanisms [133]. The Transformer-based AD utilizes attention-based sequence encoders for rapid inference, achieving an F1 score improvement of up to \\(17\\%\\)" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.286, + 0.923, + 0.424 + ], + "angle": 0, + "content": "on public datasets and reducing training time by as much as \\(99\\%\\) compared to the baseline [134]. Despite its outstanding capabilities, the Transformer still faces certain bottlenecks in AD. Attention-based methods are prone to overfitting when data is insufficient. The method in [92] seamlessly integrates contrastive learning and GAN into the Transformer, utilizing data augmentation techniques and geometric distribution masking to expand the training data, thereby enhancing data diversity and improving accuracy by \\(9.28\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.427, + 0.923, + 0.775 + ], + "angle": 0, + "content": "Attention mechanisms are also frequently applied in graph neural networks to jointly detect anomalies in time series data. Reference [135] proposes a novel efficient Transformer model based on graph learning methods, employing two-stage adversarial training to train the AD model and utilizing prototypical networks to apply the model to anomaly classification. A contrastive time-frequency reconstruction network for unsupervised AD is used for AD and localization [136], where attention mechanisms and graph convolutional networks update the feature information of each time point, combining points with similar feature relationships to dilute the influence of anomalous points on normal points. Reference [137] models the correlations between temporal variables using graph convolutional networks, while also using an attention-based reconstruction model to output the importance of time series data within each time window, achieving an average AD F1 score exceeding 0.96. For multimodal data, a multimodal graph attention network (M-GAT) and temporal convolutional networks are used to capture spatial-temporal correlations in multimodal time series and correlations between modalities [138], ultimately outputting anomaly scores through reconstruction or prediction. More details about the application of GNNs in AD will be elaborated in the next subsection." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.779, + 0.923, + 0.947 + ], + "angle": 0, + "content": "In addition to GNNs, CNNs can also incorporate attention mechanisms to enhance various metrics of AD. Reference [139] effectively captures the local features of subsequences by leveraging the locality of CNNs and combining it with positional embeddings. At the same time, Zhu et al. [139] employ attention mechanisms to extract global features from the entire time series, thereby enhancing the effectiveness and potential of detection. Many works have also introduced LSTM to extract temporal correlations in time series data based on CNN models with attention mechanisms. For example, Sun et al. [140] employ a sequential approach where 1D convolution is" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "16" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.266 + ], + "angle": 0, + "content": "first used to extract abstract features of the signal values at each time step, which are then input into a bidirectional long short-term memory network (Bi-LSTM), ultimately combining with attention mechanisms to make the model focus on locally important time steps. Meanwhile, Le et al. [141] integrate convolutional layers, LSTM layers, and self-attention layers into an autoencoder architecture to better extract complex features from multivariate time series. Similarly, Pei et al. [126] employ additional SVM to classify the attention weights based on a CNN-LSTM model with attention mechanisms to determine whether cyber-attacks have occurred in energy systems. The input data are the multimodal measurements from the deployed sensors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.269, + 0.493, + 0.603 + ], + "angle": 0, + "content": "3) GNN-based Anomaly Detection: Graph Neural Networks (GNNs) have gained increasing attention in AD tasks, as many types of data can be naturally represented as graph structures [142]. Wu et al. [143] have demonstrated the effectiveness of GNNs in identifying anomalies within complex graph-structured data environments. As neural network models specifically designed to handle graph-structured data, GNNs define nodes, edges, and graphs, where nodes represent individual elements in the dataset, such as data points in a sequence, sensor readings in multivariate data, or entities in relational datasets—denoted as the set \\( V \\). Edges capture the relationships or dependencies between these elements, denoted as the set \\( E \\), and can represent temporal correlations, spatial dependencies, or more abstract relational connections depending on the context. The graph, represented as \\( G = (V, E) \\), captures the overall structure formed by nodes and edges. The primary operations in GNN training are message passing and aggregation, which are used to update and learn node features. Specifically, during message passing, each node receives information from its neighboring nodes and updates its own state. For a node \\( v \\), the message passing formula is given as" + }, + { + "type": "equation", + "bbox": [ + 0.13, + 0.607, + 0.491, + 0.645 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {m} _ {v} ^ {(k)} = \\sum_ {u \\in \\mathcal {N} (v)} M S G \\left(\\boldsymbol {h} _ {u} ^ {(k - 1)}, \\boldsymbol {h} _ {v} ^ {(k - 1)}, \\boldsymbol {e} _ {u v}\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.649, + 0.493, + 0.725 + ], + "angle": 0, + "content": "where \\(\\mathcal{N}(v)\\) denotes the set of neighboring nodes of \\(v\\), \\(h_u\\) and \\(h_v\\) are the features of nodes \\(u\\) and \\(v\\) at layer \\(k\\), and \\(e_{uv}\\) represents the edge features. Subsequently, the received messages are aggregated with the current node state, and the node features are updated as" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.731, + 0.49, + 0.757 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {h} _ {v} ^ {(k)} = \\text {U P D A T E} \\left(\\boldsymbol {h} _ {v} ^ {(k - 1)}, \\boldsymbol {m} _ {v} ^ {(k)}\\right), \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.761, + 0.373, + 0.777 + ], + "angle": 0, + "content": "where \\( UPDATE(\\cdot, \\cdot) \\) is the update function." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.779, + 0.493, + 0.947 + ], + "angle": 0, + "content": "As illustrated in Fig. 7, which uses time series data as an example, GNNs treat each variable in the multivariate time series as a node to capture complex relationships between different dimensions. While the primary focus here is on the predictive capabilities of GNNs, it is worth noting that they are also effective in reconstruction-based AD. The final decision on whether the input sequence is anomalous is primarily based on prediction errors or graph structure differences, with reconstruction errors serving as a supplementary indicator. GNN-based AD methods excel at modeling complex dependencies between time steps or sensors, offering flexibility to handle" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.063, + 0.92, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.291, + 0.922, + 0.361 + ], + "angle": 0, + "content": "Fig. 7. GNN-based method for anomaly detection with time series data. Time series data is embedded into a graph structure, where a spatial-temporal GNN extracts dependencies. The reconstruction module then estimates the original data. Anomalies are detected based on graph relational discrepancies (differences in predicted graph structure) and prediction discrepancies (differences between reconstructed and actual time series)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.379, + 0.921, + 0.454 + ], + "angle": 0, + "content": "both static and dynamic relationships across diverse time series structures. However, they still face challenges such as high computational complexity on large-scale graphs and difficulties in constructing optimal edge and graph configurations [144]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.455, + 0.922, + 0.711 + ], + "angle": 0, + "content": "In prediction-based GNN for AD, GDN [145] is a representative work that combines a structure learning approach with GNN, additionally using attention weights to predict time series values and detect anomalies based on the predictions. Similar methods include GTA [146] and CST-GL [147]. Furthermore, Liu et al. [148] propose a GNN-based contrastive learning model that generates prediction scores from high-dimensional attributes and local structures to detect anomalies, outperforming state-of-the-art methods on seven benchmark datasets. Beyond prediction-based methods, there are also reconstruction-based GNN approaches. For example, MTAD-GAT [149] employs a graph attention network as a spatiotemporal encoder to learn dependencies across variables and time, reconstructing the time series with a backbone reconstructor and identifying anomalies based on reconstruction errors. Similar techniques include VGCRN [150] and FuSAGNet [151]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.731, + 0.921, + 0.76 + ], + "angle": 0, + "content": "C. Deep learning methods for Anomaly Detection based on Hybrid Method" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.764, + 0.922, + 0.947 + ], + "angle": 0, + "content": "In AD, reconstruction-based and prediction-based methods offer distinct but complementary approaches to identifying anomalies. Both methods rely on the discrepancy between the model's output and the actual input data as an indicator of abnormality. However, they diverge in how they handle data and their areas of application. Reconstruction-based methods focus on learning the underlying distribution of normal data. Once trained, the model attempts to recreate the input data. The reconstruction error, measured as the difference between the original data and its reconstruction, serves as a key indicator of anomalies. A high reconstruction error suggests that the data deviates from the normal patterns learned by the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.427, + 0.041 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "17" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.069, + 0.493, + 0.766 + ], + "angle": 0, + "content": "model. This approach is particularly effective in cases where understanding the full structure or distribution of the data is crucial, such as in image-based AD or other high-dimensional datasets. In contrast, prediction-based methods focus on forecasting specific attributes or missing values from the data, rather than reconstructing the entire input. These methods typically predict future values or infer missing data points by leveraging known features. If the predicted values significantly deviate from the actual values, this signals a potential anomaly. Prediction-based methods are often more suited to feature-rich datasets, where predicting specific variables can help identify irregular patterns. For instance, in applications like fraud detection, predicting expected behaviors or transactions can reveal anomalies when the predicted outcomes differ from the observed ones. While both methods differ in their data processing approaches, they can be highly complementary. In many cases, combining reconstruction-based and prediction-based techniques within a hybrid framework allows for more robust AD. Reconstruction models capture the overall structure and patterns in the data, while prediction models focus on detecting deviations in specific variables or features. This combination can provide a more comprehensive solution for identifying anomalies in complex datasets across various domains. Tang et al. [152] utilize a U-Net module as the prediction module to perform future frame prediction, amplifying reconstruction errors for abnormal events, while another U-Net module is used as the reconstruction module to enhance predicted frames for normal events, thus improving the effectiveness of AD. Lv et al. [31] adopt a dilated convolution-based autoencoder to integrate prediction errors and reconstruction errors into the output anomaly scores, effectively improving the generalization capability of the detection model. Liu et al. [153] leverage a reconstruction model and a prediction model within an end-to-end semi-supervised AD framework to effectively capture inter-variable correlations and temporal dependencies in multivariate time series data from wind turbines. Additionally, by incorporating an auxiliary discriminator with adversarial training, the model can progressively improve performance using limited labeled data, enhancing the transition from unsupervised to supervised AD. Wei et al. [154] propose a hybrid deep-learning model combining LSTM and autoencoder for AD in indoor air quality data, where the LSTM captures long-term dependencies in time-series data and the autoencoder uses reconstruction loss to detect anomalies, effectively addressing both temporal correlations and reconstruction errors for improved detection accuracy." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.788, + 0.254, + 0.804 + ], + "angle": 0, + "content": "D. Summary and Insights" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.809, + 0.493, + 0.947 + ], + "angle": 0, + "content": "This section introduces three types of deep learning-based AD methods: reconstruction-based, prediction-based, and hybrid approaches. Reconstruction-based methods are particularly effective in handling high-dimensional and unsupervised data by learning intrinsic patterns and identifying deviations through reconstruction errors. Prediction-based methods excel at modeling temporal dependencies in time-series data, enabling the detection of unexpected patterns in dynamic environments. Hybrid approaches combine these strengths" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.069, + 0.923, + 0.147 + ], + "angle": 0, + "content": "to address complex scenarios where multiple anomaly types coexist. Notably, these methods demonstrate the power of deep learning in capturing intricate patterns and dependencies that traditional methods often miss, making them indispensable for tackling diverse and challenging AD tasks." + }, + { + "type": "title", + "bbox": [ + 0.536, + 0.162, + 0.892, + 0.192 + ], + "angle": 0, + "content": "V. INTEGRATE TRADITIONAL METHOD AND DEEP LEARNING METHOD" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.196, + 0.923, + 0.362 + ], + "angle": 0, + "content": "In the field of AD, traditional methods and deep learning approaches each offer unique advantages. Traditional methods, such as clustering [155] and Support Vector Data Description [156], are often simpler, more interpretable, and computationally efficient. These methods excel in providing transparent decision-making processes, making them suitable for applications where model interpretability is crucial. On the other hand, deep learning methods, with their ability to model complex, high-dimensional data distributions, offer enhanced detection accuracy and adaptability, especially for large datasets and unstructured data like images and sequences." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.363, + 0.924, + 0.485 + ], + "angle": 0, + "content": "The integration of traditional and deep learning methods aims to leverage the interpretability and simplicity of traditional methods with the robustness and flexibility of deep learning techniques. By combining these approaches, researchers seek to create hybrid models that maintain accuracy while offering insights into the underlying decision-making process, improving both detection power and model transparency." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.504, + 0.658, + 0.518 + ], + "angle": 0, + "content": "A. Clustering method" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.522, + 0.924, + 0.947 + ], + "angle": 0, + "content": "Clustering models play a crucial role in unsupervised AD, particularly for textual data. These models group similar data points based on their proximity in feature space and identify anomalies as points that deviate from established clusters [157]. Common clustering techniques, such as k-means [158], Density-Based Spatial Clustering of Applications with Noise (DBSCAN) [159], and hierarchical clustering [160], work effectively for simpler datasets and offer the advantage of interpretability. By integrating clustering methods with deep learning, such as applying clustering post feature extraction by a neural network, it is possible to improve detection accuracy while maintaining an interpretable clustering structure. This hybrid approach is particularly useful in cases where data distribution varies, and flexible, context-aware AD is required. For instance, Li et al. [161] propose a method that extends fuzzy clustering with a reconstruction criterion and Particle Swarm Optimization (PSO) to detect anomalies in both amplitude and shape. This highlights how traditional clustering methods can benefit from optimization techniques to handle diverse anomaly types. Similarly, Markovitz et al. [162] introduce an innovative approach for AD in human actions by working directly on human pose graphs extracted from video sequences. By mapping these graphs to a latent space, clustering them, and applying a Dirichlet process-based mixture model, the method effectively leverages probabilistic modeling to enhance the robustness and flexibility of clustering for action recognition. In video AD, Qiu et al. [163] propose a convolution-enhanced self-attentive video auto-encoder" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "18" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.327 + ], + "angle": 0, + "content": "integrated with a dual-scale clustering module based on the K-means algorithm. This approach effectively distinguishes normal and abnormal video data by enhancing feature representations and addressing the fuzzy boundaries between them. Additionally, Peng et al. [33] introduce a multivariate ELM-MI framework combined with a dynamic kernel selection method. By employing hierarchical clustering on unlabeled data to determine kernels, this method enables unsupervised online detection of various anomaly types, including point and group anomalies, while reducing computational costs and improving robustness. These studies collectively highlight the potential of hybrid approaches that integrate clustering with advanced techniques like deep learning, probabilistic modeling, or optimization frameworks. Such methods leverage the interpretability and simplicity of traditional clustering while addressing its limitations in handling complex data, offering a promising pathway for accurate and flexible AD." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.339, + 0.231, + 0.354 + ], + "angle": 0, + "content": "B. Normalizing Flows" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.357, + 0.493, + 0.506 + ], + "angle": 0, + "content": "Normalizing Flows (NF) [164] offer a probabilistic framework for AD by estimating the probability distribution of data. Using a sequence of invertible transformations, NFs can model complex distributions, making them particularly effective for identifying anomalies as low-probability events. When integrated with deep learning models, such as CNNs or RNNs, NFs act as precise probabilistic estimators, complementing the feature extraction capabilities of deep networks. This hybrid framework enhances AD, particularly in high-dimensional or unstructured datasets." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.508, + 0.493, + 0.914 + ], + "angle": 0, + "content": "For instance, Yu et al. [165] propose FastFlow, a 2D normalizing flow module integrated with deep feature extractors like ResNet and Vision Transformers. By effectively modeling feature distributions and capturing both local and global relationships, FastFlow achieves state-of-the-art performance, with a \\(99.4\\%\\) AUC on the MVTec AD dataset, while maintaining high inference efficiency. Similarly, Cho et al. [166] introduce Implicit Two-path Autoencoder (ITAE), which reconstructs normal video patterns by implicitly modeling appearance and motion features through two encoders and a shared decoder. NF enhances ITAE by estimating the density of normal embeddings, enabling robust detection of out-of-distribution anomalies, with strong results across six surveillance benchmarks. For multivariate time series data, Zhou et al. [167] combine a graph structure learning model with entity-aware normalizing flows to capture interdependencies and evolving relations among entities. By estimating entity-specific densities and employing a clustering strategy for similar entities, the extended MTGFlow_cluster improves density estimation accuracy, demonstrating superior performance on six benchmark datasets. Further expanding on the use of graphs, Dai et al. [168] propose Graph-Augmented Normalizing Flow (GANF), which incorporates a Bayesian network to model causal relationships among time series. This approach factorizes joint probabilities into conditional probabilities, improving density estimation and enabling effective detection of anomalies in low-density regions, as well as identifying distribution drifts." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.947 + ], + "angle": 0, + "content": "These studies collectively highlight the strengths of integrating Normalizing Flows with traditional and deep learning-" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.069, + 0.923, + 0.177 + ], + "angle": 0, + "content": "based methods. By combining the interpretability and precision of probabilistic models with the expressive power of deep networks or graph structures, these hybrid approaches address the challenges of complex data distributions, offering scalable and robust solutions for diverse AD tasks. This synergy underscores the potential of such methods to push the boundaries of accuracy and adaptability in real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.195, + 0.755, + 0.21 + ], + "angle": 0, + "content": "C. Support Vector Data Description" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.213, + 0.923, + 0.439 + ], + "angle": 0, + "content": "Support Vector Data Description (SVDD) [156] is a traditional machine learning method used to define a boundary around normal data points, effectively distinguishing them from anomalies. Unlike binary classification, SVDD is particularly effective for one-class classification tasks, where only normal data is available. This approach is computationally efficient and interpretable, as it provides a clear boundary between normal and abnormal points. By integrating SVDD with deep learning, researchers can enhance the boundary definition based on high-dimensional features extracted by a neural network, resulting in a model that combines the boundary precision of SVDD with the feature richness of deep learning. This hybrid model is highly effective in scenarios where boundary clarity and interpretability are paramount, such as in industrial monitoring or fraud detection." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.44, + 0.923, + 0.725 + ], + "angle": 0, + "content": "To improve latent representations, Zhou et al. [169] propose Deep SVDD-VAE, which jointly optimizes VAE and SVDD. The VAE reconstructs input data, and SVDD simultaneously defines a spherical boundary in the latent space, ensuring separability of normal and anomalous instances. This joint optimization significantly outperforms traditional AE-based methods, as shown on MNIST, CIFAR-10, and GTSRB datasets. For variable-length time series data, Ergen et al. [124] introduce an LSTM-based AD framework, where LSTM and SVDD are jointly optimized using modified objectives. This method extends seamlessly to GRU architectures, demonstrating strong performance across unsupervised, semisupervised, and supervised settings. Besides, Zhang et al. [170] propose Deep Structure Preservation SVDD (DSPSVDD), which simultaneously minimizes hypersphere volume and network reconstruction error. This dual objective ensures deep feature preservation and enhances AD performance, outperforming traditional SVDD models on datasets like MNIST and MVTec AD." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.726, + 0.923, + 0.817 + ], + "angle": 0, + "content": "These studies highlight the strengths of combining SVDD with deep learning, where deep models enhance feature representation while SVDD ensures boundary precision. This hybrid framework effectively addresses limitations in both methods, offering a scalable and interpretable solution for complex AD tasks across diverse domains." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.836, + 0.685, + 0.851 + ], + "angle": 0, + "content": "D. Summary and Insights" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.922, + 0.947 + ], + "angle": 0, + "content": "This section explores the integration of traditional and deep learning methods for AD, highlighting how their complementary strengths can be combined. Traditional methods, known for their simplicity, interpretability, and computational efficiency, excel in scenarios where transparency is critical. In contrast, deep learning methods offer superior adaptability" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.041 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "19" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.176 + ], + "angle": 0, + "content": "and accuracy, particularly for high-dimensional and unstructured data. By integrating these approaches, hybrid models can leverage the interpretability of traditional methods while retaining the robustness and flexibility of deep learning. This fusion not only enhances AD performance but also bridges the gap between accuracy and model transparency, making it a promising direction for future research." + }, + { + "type": "title", + "bbox": [ + 0.145, + 0.196, + 0.422, + 0.21 + ], + "angle": 0, + "content": "VI. OPEN ISSUES AND FUTURE WORKS" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.217, + 0.21, + 0.23 + ], + "angle": 0, + "content": "A. Data Collection" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.237, + 0.493, + 0.373 + ], + "angle": 0, + "content": "Data scarcity and class imbalance remain major challenges in AD. Since anomalies are rare, obtaining large labeled datasets is costly and time-consuming, especially when expert annotation is required. Supervised learning struggles due to the lack of abnormal samples, while the overwhelming presence of normal data biases models toward common patterns. This problem is particularly critical in cybersecurity, healthcare, and industrial monitoring, where undetected anomalies can have serious consequences." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.374, + 0.495, + 0.691 + ], + "angle": 0, + "content": "Several approaches mitigate these issues. Semi-supervised and unsupervised learning exploit normal data distributions to detect deviations without requiring labeled anomalies [171] [172]. Data augmentation, synthetic data generation, and oversampling improve data balance by increasing the number of anomalous examples, helping models generalize better [173] [174]. Despite these advancements, challenges remain. Semi-supervised methods struggle with subtle anomalies that closely resemble normal data. Augmentation techniques, often based on simple transformations, may fail to capture complex domain-specific variations. Similarly, synthetic data generation may not fully reflect real-world anomaly diversity, leading to models biased toward normal samples. Moreover, even with augmentation, models risk overfitting to the majority class, compromising anomaly detection performance. Ensuring that models remain sensitive to rare anomalies while maintaining accuracy on normal data remains an ongoing challenge. Future research may focus on refining self-supervised learning [175], improving the diversity of synthetic samples [176], and developing more adaptive anomaly detection frameworks to enhance robustness in real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.714, + 0.283, + 0.729 + ], + "angle": 0, + "content": "B. Computational Complexity" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.733, + 0.493, + 0.914 + ], + "angle": 0, + "content": "In AD, computational complexity is a crucial factor, especially for systems operating in real-time environments or handling large-scale datasets. The efficiency of an algorithm directly impacts its feasibility in fields like industrial monitoring, cybersecurity, and autonomous systems, where swift detection is essential. Many advanced models, particularly deep learning approaches like autoencoders, GANs, and LSTMs, are computationally intensive due to their complex architectures and iterative learning processes. This often leads to trade-offs between detection accuracy and computational efficiency, with continuous efforts aimed at optimizing models to reduce computational demands without sacrificing performance." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.493, + 0.947 + ], + "angle": 0, + "content": "Moreover, AD models frequently require substantial memory resources, especially when dealing with high-dimensional" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.07, + 0.923, + 0.266 + ], + "angle": 0, + "content": "or streaming data, making memory usage a crucial consideration. Techniques like memory-efficient architectures, data compression, and sparse modeling are commonly used to address this issue. Real-time AD adds further complexity, as algorithms must process incoming data and make rapid decisions in applications like autonomous driving and fraud detection [177], where even minimal delays can have severe consequences. Achieving real-time performance typically involves optimizing data processing speeds and decision-making through lightweight models [178] [179] and parallel processing techniques, such as GPU acceleration [180]. However, balancing real-time detection capabilities with high accuracy remains challenging." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.266, + 0.923, + 0.417 + ], + "angle": 0, + "content": "The tension between computational complexity and detection accuracy persists, as complex models often excel in detection but lack practical applicability for real-time or large-scale scenarios. Simpler models, though computationally efficient, may fail to detect nuanced anomalies. Hybrid models or multi-stage frameworks that deploy complex methods only as needed provide a potential solution. Additionally, future research may benefit from exploring distributed computing solutions, like cloud [181] or edge computing, to enhance real-time AD performance in resource-limited environments." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.43, + 0.763, + 0.445 + ], + "angle": 0, + "content": "C. Explainability and Interpretability" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.448, + 0.923, + 0.614 + ], + "angle": 0, + "content": "Deep learning methods have greatly advanced AD by capturing complex patterns in high-dimensional data. However, they are often criticized as \"black-box\" models due to their lack of transparency, making it challenging to understand why certain data points are flagged as anomalies. For fields like healthcare, finance, or industrial monitoring, accurate detection alone is insufficient; stakeholders also need clear explanations to understand why a particular anomaly was detected. This lack of interpretability limits the practical deployment of deep learning models, as the inability to justify decisions reduces trust and hinders adoption in critical applications." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.614, + 0.923, + 0.809 + ], + "angle": 0, + "content": "In fields like healthcare, where anomalies may be linked to medical diagnoses, or in finance, where fraud detection can carry legal implications, interpretability is essential. Transparent model decisions enable experts to validate results and make informed decisions. In safety-critical applications, such as autonomous driving or industrial equipment monitoring, understanding the rationale behind AD is vital for ensuring safety. One major challenge is balancing the trade-off between model interpretability and performance. Simpler models, like decision trees or linear regression, offer greater transparency but often lack the complexity needed to detect subtle anomalies in high-dimensional data. In contrast, deep learning models provide high accuracy but are harder to interpret." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.81, + 0.923, + 0.947 + ], + "angle": 0, + "content": "Ongoing research is exploring hybrid approaches, where interpretable models are combined with more complex ones, allowing for accurate AD with the added benefit of interpretability. For example, attention mechanisms [182] in neural networks can help highlight specific data regions influencing decisions, providing insights into the model's internal workings. Alternatively, tools like Local Interpretable Model-agnostic Explanations (LIME) and SHapley Additive exPlanations (SHAP) [2] can offer post-hoc explanations, improving" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.03, + 0.428, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "20" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.131 + ], + "angle": 0, + "content": "transparency without altering model structure. Future research could also focus on real-time explainability in time-sensitive applications, and incorporating domain knowledge or user feedback to enhance model interpretability." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.148, + 0.36, + 0.163 + ], + "angle": 0, + "content": "D. Handling Diverse Types of Anomalies" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.167, + 0.493, + 0.348 + ], + "angle": 0, + "content": "In real-world AD, multiple types of anomalies often coexist, adding complexity to the detection process. Beyond point anomalies, which are the simplest, other types like contextual and collective anomalies are common, especially in dynamic environments. For instance, in intelligent transportation systems, anomalies may include both isolated incidents (e.g., a single vehicle's sudden deceleration) and collective patterns (e.g., multiple vehicles simultaneously slowing down), each requiring different detection methods. Effectively capturing these varied anomaly types requires flexible models capable of adapting to different anomaly patterns without focusing on only one type." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.348, + 0.495, + 0.545 + ], + "angle": 0, + "content": "Continuous research is needed to develop models that can generalize across anomaly types, enhancing adaptability and balancing detection accuracy with model flexibility. Hybrid approaches, for instance, can integrate different methods to capture diverse anomalies more effectively. The challenge remains in achieving this versatility without sacrificing accuracy, as models must maintain strong performance across different contexts. Future work may also explore multi-modal models [183] that combine different types of data, further improving detection capabilities by drawing from diverse data sources. These directions aim to create AD systems that are both robust and adaptable, capable of handling the complex and mixed nature of real-world anomaly scenarios." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.559, + 0.353, + 0.572 + ], + "angle": 0, + "content": "VII. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.578, + 0.495, + 0.866 + ], + "angle": 0, + "content": "In this survey, we have provided a comprehensive overview of the recent advancements in AD with a primary focus on deep learning techniques from 2019 to 2024. By analyzing over 180 research papers from leading journals and conferences, we have explored how AD methods have evolved to address diverse challenges across various types of data. This survey categorizes and examines deep learning methods into reconstruction-based, prediction-based, and hybrid approaches, highlighting their strengths, limitations, and applications. Recognizing the simplicity, interpretability, and computational efficiency of traditional AD methods, we reviewed their integration with deep learning techniques. These hybrid approaches aim to leverage the strengths of both paradigms, enhancing robustness and efficiency in AD systems. This survey not only sheds light on the state-of-the-art techniques but also identifies gaps and opportunities for future research. By focusing on the latest trends and innovations, this work aims to inspire further exploration and advancements in the rapidly evolving field of AD." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.879, + 0.333, + 0.892 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.898, + 0.493, + 0.946 + ], + "angle": 0, + "content": "[1] L. Ruff, J. R. Kauffmann, R. A. Vandermeulen, G. Montavon, W. Samek, M. Kloft, T. G. Dietterich, and K.-R. Müller, “A unifying review of deep and shallow anomaly detection,” Proceed. IEEE, vol. 109, no. 5, pp. 756–795, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.071, + 0.922, + 0.117 + ], + "angle": 0, + "content": "[2] V. Vimbi, N. Shaffi, and M. Mahmud, \"Interpreting artificial intelligence models: a systematic review on the application of lime and shap in alzheimer's disease detection,\" Brain Informatics, vol. 11, no. 1, p. 10, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.118, + 0.922, + 0.152 + ], + "angle": 0, + "content": "[3] F. Al-Turjman, H. Zahmatkesh, and R. Shahroze, “An overview of security and privacy in smart cities’ IoT communications,” Trans. Emerg. Telecommun. Technol., vol. 33, no. 3, p. e3677, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.153, + 0.921, + 0.197 + ], + "angle": 0, + "content": "[4] Y. A. Qadri, A. Nauman, Y. B. Zikria, A. V. Vasilakos, and S. W. Kim, \"The future of healthcare internet of things: a survey of emerging technologies,\" IEEE Commun. Surv. Tutor., vol. 22, no. 2, pp. 1121-1167, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.198, + 0.921, + 0.233 + ], + "angle": 0, + "content": "[5] M. Humayun, N. Jhanjhi, B. Hamid, and G. Ahmed, “Emerging smart logistics and transportation using IoT and blockchain,” IEEE Internet Things Mag., vol. 3, no. 2, pp. 58–62, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.234, + 0.92, + 0.267 + ], + "angle": 0, + "content": "[6] S. H. Haji and S. Y. Ameen, \"Attack and anomaly detection in IoT networks using machine learning techniques: A review,\" Asian J. Res. Comput. Sci, vol. 9, no. 2, pp. 30-46, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.268, + 0.921, + 0.312 + ], + "angle": 0, + "content": "[7] V. Mothukuri, P. Khare, R. M. Parizi, S. Pouriyeh, A. Dehghantanha, and G. Srivastava, \"Federated-learning-based anomaly detection for IoT security attacks,\" IEEE Internet Things J., vol. 9, no. 4, pp. 2545-2554, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.313, + 0.921, + 0.357 + ], + "angle": 0, + "content": "[8] S. A. Al Mamun and J. Valimaki, “Anomaly detection and classification in cellular networks using automatic labeling technique for applying supervised learning,” *Proceedia Comput. Sci.*, vol. 140, pp. 186-195, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.359, + 0.92, + 0.415 + ], + "angle": 0, + "content": "[9] M. E. Villa-Pérez, M. A. Alvarez-Carmona, O. Loyola-Gonzalez, M. A. Medina-Pérez, J. C. Velazco-Rossell, and K.-K. R. Choo, \"Semisupervised anomaly detection algorithms: A comparative summary and future research directions,\" Knowledge-Based Systems, vol. 218, p. 106878, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.416, + 0.92, + 0.451 + ], + "angle": 0, + "content": "[10] G. Michau and O. Fink, \"Unsupervised transfer learning for anomaly detection: Application to complementary operating condition transfer,\" Knowledge-Based Systems, vol. 216, p. 106816, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.452, + 0.92, + 0.485 + ], + "angle": 0, + "content": "[11] Y. Liang, J. Zhang, S. Zhao, R. Wu, Y. Liu, and S. Pan, \"Omni-frequency channel-selection representations for unsupervised anomaly detection,\" IEEE Trans. Image Process., 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.486, + 0.921, + 0.518 + ], + "angle": 0, + "content": "[12] B. Siegel, \"Industrial anomaly detection: A comparison of unsupervised neural network architectures,\" IEEE Sens. Lett., vol. 4, no. 8, pp. 1-4, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.519, + 0.92, + 0.564 + ], + "angle": 0, + "content": "[13] P. Bergmann, M. Fauser, D. Sattlegger, and C. Steger, \"Mvtec ad-a comprehensive real-world dataset for unsupervised anomaly detection,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2019, pp. 9592-9600." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.565, + 0.921, + 0.599 + ], + "angle": 0, + "content": "[14] S. Schmidl, P. Wenig, and T. Papenbrock, \"Anomaly detection in time series: a comprehensive evaluation,\" Proc. VLDB Endow., vol. 15, no. 9, pp. 1779-1797, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.599, + 0.92, + 0.634 + ], + "angle": 0, + "content": "[15] S. Zhai, Y. Cheng, W. Lu, and Z. Zhang, \"Deep structured energy based models for anomaly detection,\" in Int. Conf. Mach. Learn. (ICML). PMLR, 2016, pp. 1100-1109." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.635, + 0.92, + 0.68 + ], + "angle": 0, + "content": "[16] H. Sarmadi and A. Karamodin, “A novel anomaly detection method based on adaptive mahalanobis-squared distance and one-class knn rule for structural health monitoring under environmental effects,” Mech. Syst. Signal Process., vol. 140, p. 106495, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.68, + 0.92, + 0.714 + ], + "angle": 0, + "content": "[17] I. Syarif, A. Prugel-Bennett, and G. Wills, “Unsupervised clustering approach for network anomaly detection,” in Netw. Digit. Technol., Int. Conf., NDT 2012, Proc., Part I. Springer, 2012, pp. 135–145." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.715, + 0.921, + 0.748 + ], + "angle": 0, + "content": "[18] D. Samariya and A. Thakkar, “A comprehensive survey of anomaly detection algorithms,” Ann. Data Sci., vol. 10, no. 3, pp. 829–850, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.749, + 0.92, + 0.782 + ], + "angle": 0, + "content": "[19] G. Pang, C. Shen, L. Cao, and A. V. D. Hengel, “Deep learning for anomaly detection: A review,” ACM Comput. Surv., vol. 54, no. 2, pp. 1-38, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.784, + 0.92, + 0.806 + ], + "angle": 0, + "content": "[20] L. Bergman, N. Cohen, and Y. Hoshen, \"Deep nearest neighbor anomaly detection,\" arXiv preprint arXiv:2002.10445, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.807, + 0.92, + 0.841 + ], + "angle": 0, + "content": "[21] K. Leung and C. Leckie, \"Unsupervised anomaly detection in network intrusion detection using clusters,\" in Proc. 28th Australas. Conf. Comput. Sci., vol. 38, 2005, pp. 333-342." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.842, + 0.92, + 0.875 + ], + "angle": 0, + "content": "[22] H. Ringberg, A. Soule, J. Rexford, and C. Diot, \"Sensitivity of pca for traffic anomaly detection,\" in Proc. 2007 ACM SIGMETRICS Int. Conf. Meas. Model. Comput. Syst., 2007, pp. 109-120." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.876, + 0.921, + 0.91 + ], + "angle": 0, + "content": "[23] D. Kwon, H. Kim, J. Kim, S. C. Suh, I. Kim, and K. J. Kim, “A survey of deep learning-based network anomaly detection,” Cluster Computing, vol. 22, pp. 949–961, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.91, + 0.921, + 0.945 + ], + "angle": 0, + "content": "[24] A. Aldweesh, A. Derhab, and A. Z. Emam, \"Deep learning approaches for anomaly-based intrusion detection systems: A survey, taxonomy, and open issues,\" Knowl.-Based Syst., vol. 189, p. 105124, 2020." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.071, + 0.922, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.427, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.906, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "21" + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.072, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[25] L. Li, J. Yan, H. Wang, and Y. Jin, \"Anomaly detection of time series with smoothness-inducing sequential variational auto-encoder,\" IEEE Trans. Neural Netw. Learn. Syst., vol. 32, no. 3, pp. 1177-1191, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.107, + 0.492, + 0.142 + ], + "angle": 0, + "content": "[26] G. Harshvardhan, M. K. Gourisaria, M. Pandey, and S. S. Rautaray, \"A comprehensive survey and analysis of generative models in machine learning,\" Comput. Sci. Rev., vol. 38, p. 100285, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.143, + 0.492, + 0.166 + ], + "angle": 0, + "content": "[27] B. Nachman and D. Shih, \"Anomaly detection with density estimation,\" Phys. Rev. D, vol. 101, no. 7, p. 075042, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.166, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[28] A. B. Nassif, M. A. Talib, Q. Nasir, and F. M. Dakalbab, \"Machine learning for anomaly detection: A systematic review,\" IEEE Access, vol. 9, pp. 78658-78700, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.201, + 0.492, + 0.246 + ], + "angle": 0, + "content": "[29] X. Ma, J. Wu, S. Xue, J. Yang, C. Zhou, Q. Z. Sheng, H. Xiong, and L. Akoglu, “A comprehensive survey on graph anomaly detection with deep learning,” IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12012–12038, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.246, + 0.492, + 0.28 + ], + "angle": 0, + "content": "[30] X. Xia, X. Pan, N. Li, X. He, L. Ma, X. Zhang, and N. Ding, “Gan-based anomaly detection: A review,” Neurocomputing, vol. 493, pp. 497-535, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.281, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[31] J. Lv, Y. Wang, and S. Chen, \"Adaptive multivariate time-series anomaly detection,\" Inf. Process. Manag., vol. 60, no. 4, p. 103383, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.316, + 0.492, + 0.352 + ], + "angle": 0, + "content": "[32] M. Y. I. Basheer, A. M. Ali, N. H. A. Hamid, M. A. M. Ariffin, R. Osman, S. Nordin, and X. Gu, \"Autonomous anomaly detection for streaming data,\" Knowledge-Based Systems, vol. 284, p. 111235, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.352, + 0.492, + 0.397 + ], + "angle": 0, + "content": "[33] X. Peng, H. Li, F. Yuan, S. G. Razul, Z. Chen, and Z. Lin, \"An extreme learning machine for unsupervised online anomaly detection in multivariate time series,\" Neurocomputing, vol. 501, pp. 596-608, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.398, + 0.492, + 0.444 + ], + "angle": 0, + "content": "[34] Y. Choi, H. Lim, H. Choi, and I.-J. Kim, \"Gan-based anomaly detection and localization of multivariate time series data for power plant,\" in Proc. 2020 IEEE Int. Conf. Big Data Smart Comput. (BigComp). IEEE, 2020, pp. 71-74." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.444, + 0.492, + 0.478 + ], + "angle": 0, + "content": "[35] H.-T. Duong, V.-T. Le, and V. T. Hoang, \"Deep learning-based anomaly detection in video surveillance: a survey,\" Sensors, vol. 23, no. 11, p. 5024, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.479, + 0.492, + 0.514 + ], + "angle": 0, + "content": "[36] S. Thudumu, P. Branch, J. Jin, and J. Singh, \"A comprehensive survey of anomaly detection techniques for high dimensional big data,\" Journal of Big Data, vol. 7, pp. 1-30, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.514, + 0.492, + 0.548 + ], + "angle": 0, + "content": "[37] I. Souiden, M. N. Omri, and Z. Brahmi, “A survey of outlier detection in high dimensional data streams,” Comput. Sci. Rev., vol. 44, p. 100463, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.549, + 0.492, + 0.584 + ], + "angle": 0, + "content": "[38] Q. Ding and E. D. Kolaczyk, “A compressed pca subspace method for anomaly detection in high-dimensional data,” IEEE Trans. Inf. Theory, vol. 59, no. 11, pp. 7419–7433, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.584, + 0.492, + 0.619 + ], + "angle": 0, + "content": "[39] M. Sakurada and T. Yairi, \"Anomaly detection using autoencoders with nonlinear dimensionality reduction,\" in Proc. MLSDA 2014 2nd Workshop Mach. Learn. Sensory Data Anal., 2014, pp. 4-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.619, + 0.492, + 0.664 + ], + "angle": 0, + "content": "[40] T. Cheng and B. Wang, \"Total variation and sparsity regularized decomposition model with union dictionary for hyperspectral anomaly detection,\" IEEE Trans. Geosci. Remote Sens., vol. 59, no. 2, pp. 1472-1486, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.665, + 0.492, + 0.7 + ], + "angle": 0, + "content": "[41] L. Li, W. Li, Q. Du, and R. Tao, \"Low-rank and sparse decomposition with mixture of gaussian for hyperspectral anomaly detection,\" IEEE Trans. Cybern., vol. 51, no. 9, pp. 4363-4372, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.7, + 0.492, + 0.735 + ], + "angle": 0, + "content": "[42] S. Han and S. S. Woo, “Learning sparse latent graph representations for anomaly detection in multivariate time series,” in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min., 2022, pp. 2977–2986." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.735, + 0.492, + 0.769 + ], + "angle": 0, + "content": "[43] X. Ma and W. Shi, “Aesmote: Adversarial reinforcement learning with smote for anomaly detection,” IEEE Trans. Netw. Sci. Eng., vol. 8, no. 2, pp. 943–956, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.77, + 0.492, + 0.804 + ], + "angle": 0, + "content": "[44] M. Kim, E. Ou, P.-L. Loh, T. Allen, R. Agasie, and K. Liu, \"Rnn-based online anomaly detection in nuclear reactors for highly imbalanced datasets with uncertainty,\" Nucl. Eng. Des., vol. 364, p. 110699, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.805, + 0.492, + 0.84 + ], + "angle": 0, + "content": "[45] G. Dlamini and M. Fahim, “Dgm: a data generative model to improve minority class presence in anomaly detection domain,” Neural Comput. Appl., vol. 33, pp. 13635–13646, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.84, + 0.492, + 0.874 + ], + "angle": 0, + "content": "[46] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, \"Adbench: Anomaly detection benchmark,\" Adv. Neural Inf. Process. Syst., vol. 35, pp. 32-142-32-159, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.874, + 0.492, + 0.91 + ], + "angle": 0, + "content": "[47] Y. Zhang, Y. Chen, J. Wang, and Z. Pan, \"Unsupervised deep anomaly detection for multi-sensor time-series signals,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 2, pp. 2118-2132, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.91, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[48] D. Chen, L. Yue, X. Chang, M. Xu, and T. Jia, \"Nm-gan: Noise-modulated generative adversarial network for video anomaly detection,\" Pattern Recognition, vol. 116, p. 107969, 2021." + }, + { + "type": "list", + "bbox": [ + 0.083, + 0.072, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.071, + 0.92, + 0.106 + ], + "angle": 0, + "content": "[49] M. U. Hassan, M. H. Rehmani, and J. Chen, \"Anomaly detection in blockchain networks: A comprehensive survey,\" IEEE Commun. Surv. Tutor., vol. 25, no. 1, pp. 289-318, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.107, + 0.92, + 0.152 + ], + "angle": 0, + "content": "[50] Y. Liu, S. Garg, J. Nie, Y. Zhang, Z. Xiong, J. Kang, and M. S. Hossain, \"Deep anomaly detection for time-series data in industrial IoT: A communication-efficient on-device federated learning approach,\" IEEE Internet Things J., vol. 8, no. 8, pp. 6348-6358, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.152, + 0.92, + 0.198 + ], + "angle": 0, + "content": "[51] M. J. Idrissi, H. Alami, A. El Mahdaouy, A. El Mekki, S. Oualil, Z. Yartaoui, and I. Berrada, “Fed-anids: Federated learning for anomaly-based network intrusion detection systems,” Expert Syst. Appl., vol. 234, p. 121000, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.198, + 0.92, + 0.244 + ], + "angle": 0, + "content": "[52] L. Cui, Y. Qu, G. Xie, D. Zeng, R. Li, S. Shen, and S. Yu, \"Security and privacy-enhanced federated learning for anomaly detection in IoT infrastructures,\" IEEE Trans. Ind. Inform., vol. 18, no. 5, pp. 3492-3500, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.244, + 0.92, + 0.289 + ], + "angle": 0, + "content": "[53] X. Wang, J. Liu, T. Qiu, C. Mu, C. Chen, and P. Zhou, \"A real-time collision prediction mechanism with deep learning for intelligent transportation system,\" IEEE Trans. Veh. Technol., vol. 69, no. 9, pp. 9497-9508, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.289, + 0.92, + 0.325 + ], + "angle": 0, + "content": "[54] G. Li, T.-H. Nguyen, and J. J. Jung, \"Traffic incident detection based on dynamic graph embedding in vehicular edge computing,\" Appl. Sci., vol. 11, no. 13, p. 5861, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.325, + 0.92, + 0.359 + ], + "angle": 0, + "content": "[55] G. Li and J. J. Jung, \"Deep learning for anomaly detection in multivariate time series: Approaches, applications, and challenges,\" Inf. Fusion, vol. 91, pp. 93-102, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.359, + 0.92, + 0.393 + ], + "angle": 0, + "content": "[56] C. Zhao, X. Chang, T. Xie, H. Fujita, and J. Wu, \"Unsupervised anomaly detection based method of risk evaluation for road traffic accident,\" Appl. Intell., vol. 53, no. 1, pp. 369-384, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.393, + 0.92, + 0.428 + ], + "angle": 0, + "content": "[57] S. Li, A. Pandey, B. Hooi, C. Faloutsos, and L. Pileggi, \"Dynamic graph-based anomaly detection in the electrical grid,\" IEEE Trans. Power Syst., vol. 37, no. 5, pp. 3408-3422, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.429, + 0.92, + 0.462 + ], + "angle": 0, + "content": "[58] X. Wang and S.-H. Ahn, “Real-time prediction and anomaly detection of electrical load in a residential community,” Appl. Energy, vol. 259, p. 114145, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.462, + 0.92, + 0.508 + ], + "angle": 0, + "content": "[59] I. Siniosoglou, P. Radoglou-Grammatikis, G. Efstathopoulos, P. Fouliras, and P. Sarigiannidis, “A unified deep learning anomaly detection and classification approach for smart grid environments,” IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1137-1151, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.508, + 0.92, + 0.543 + ], + "angle": 0, + "content": "[60] T. Fernando, H. Gammulle, S. Denman, S. Sridharan, and C. Fookes, \"Deep learning for medical anomaly detection-a survey,\" ACM Comput. Surv., vol. 54, no. 7, pp. 1-37, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.543, + 0.92, + 0.577 + ], + "angle": 0, + "content": "[61] E. Šabić, D. Keeley, B. Henderson, and S. Nannemann, “Healthcare and anomaly detection: using machine learning to predict anomalies in heart rate data,” *Ai & Society*, vol. 36, no. 1, pp. 149–158, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.577, + 0.92, + 0.611 + ], + "angle": 0, + "content": "[62] K. G. Al-Hashedi and P. Magalingam, “Financial fraud detection applying data mining techniques: A comprehensive review from 2009 to 2019,” Comput. Sci. Rev., vol. 40, p. 100402, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.611, + 0.92, + 0.646 + ], + "angle": 0, + "content": "[63] W. Hilal, S. A. Gadsden, and J. Yawney, \"Financial fraud: a review of anomaly detection techniques and recent advances,\" Expert Syst. Appl., vol. 193, p. 116429, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.646, + 0.92, + 0.68 + ], + "angle": 0, + "content": "[64] H. Fujita, A. Gaeta, V. Loia, and F. Orciuoli, “Resilience analysis of critical infrastructures: A cognitive approach based on granular computing,” IEEE Trans. Cybern., vol. 49, no. 5, pp. 1835–1848, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.68, + 0.92, + 0.715 + ], + "angle": 0, + "content": "[65] V. K. Singh and M. Govindarasu, “A cyber-physical anomaly detection for wide-area protection using machine learning,” IEEE Trans. Smart Grid, vol. 12, no. 4, pp. 3514–3526, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.715, + 0.92, + 0.759 + ], + "angle": 0, + "content": "[66] S. M. Nagarajan, G. G. Deverajan, A. K. Bashir, R. P. Mahapatra, and M. S. Al-Numay, \"TADF-cps: Intelligent anomaly detection framework towards cyber physical systems,\" Comput. Commun., vol. 188, pp. 81–89, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.76, + 0.92, + 0.806 + ], + "angle": 0, + "content": "[67] T. Nakao, S. Hanaoka, Y. Nomura, M. Murata, T. Takenaga, S. Miki, T. Watadani, T. Yoshikawa, N. Hayashi, and O. Abe, \"Unsupervised deep anomaly detection in chest radiographs,\" J. Digit. Imaging, vol. 34, pp. 418-427, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.806, + 0.92, + 0.852 + ], + "angle": 0, + "content": "[68] W. H. Pinaya, P.-D. Tudosiu, R. Gray, G. Rees, P. Nachev, S. Ourselin, and M. J. Cardoso, \"Unsupervised brain imaging 3d anomaly detection and segmentation with transformers,\" Med. Image Anal., vol. 79, p. 102475, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.852, + 0.92, + 0.886 + ], + "angle": 0, + "content": "[69] L. Chen, Z. You, N. Zhang, J. Xi, and X. Le, “Utrad: Anomaly detection and localization with u-transformer,” Neural Networks, vol. 147, pp. 53–62, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.886, + 0.92, + 0.921 + ], + "angle": 0, + "content": "[70] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, \"Anomaly detection in medical imaging with deep perceptual autoencoders,\" IEEE Access, vol. 9, pp. 118571-118583, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.921, + 0.92, + 0.945 + ], + "angle": 0, + "content": "[71] R. L. Draelos, D. Dov, M. A. Mazurowski, J. Y. Lo, R. Henao, G. D. Rubin, and L. Carin, \"Machine-learning-based multiple abnormality" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.071, + 0.92, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.427, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.906, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "22" + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.072, + 0.49, + 0.095 + ], + "angle": 0, + "content": "prediction with large-scale chest computed tomography volumes,\" Med. Image Anal., vol. 67, p. 101857, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.096, + 0.492, + 0.13 + ], + "angle": 0, + "content": "[72] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, \"Anomaly detection in medical imaging with deep perceptual autoencoders,\" IEEE Access, vol. 9, pp. 118571-118583, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.131, + 0.492, + 0.176 + ], + "angle": 0, + "content": "[73] H. Zhao, Y. Li, N. He, K. Ma, L. Fang, H. Li, and Y. Zheng, \"Anomaly detection for medical images using self-supervised and translation-consistent features,\" IEEE Trans. Med. Imaging, vol. 40, no. 12, pp. 3641-3651, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.177, + 0.492, + 0.212 + ], + "angle": 0, + "content": "[74] R. Nayak, U. C. Pati, and S. K. Das, “A comprehensive review on deep learning-based methods for video anomaly detection,” Image Vis. Comput., vol. 106, p. 104078, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.213, + 0.492, + 0.247 + ], + "angle": 0, + "content": "[75] Y. Wang, T. Liu, J. Zhou, and J. Guan, \"Video anomaly detection based on spatio-temporal relationships among objects,\" Neurocomputing, vol. 532, pp. 141-151, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.248, + 0.492, + 0.281 + ], + "angle": 0, + "content": "[76] N. Li, F. Chang, and C. Liu, \"Spatial-temporal cascade autoencoder for video anomaly detection in crowded scenes,\" IEEE Trans. Multimed., vol. 23, pp. 203-215, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.282, + 0.492, + 0.317 + ], + "angle": 0, + "content": "[77] D. Chen, P. Wang, L. Yue, Y. Zhang, and T. Jia, “Anomaly detection in surveillance video based on bidirectional prediction,” Image Vis. Comput., vol. 98, p. 103915, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.318, + 0.492, + 0.352 + ], + "angle": 0, + "content": "[78] M. H. Bhuyan, D. K. Bhattacharyya, and J. K. Kalita, “Network anomaly detection: methods, systems and tools,” IEEE Commun. Surv. Tutor., vol. 16, no. 1, pp. 303-336, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.353, + 0.492, + 0.397 + ], + "angle": 0, + "content": "[79] S. Liu, B. Zhou, Q. Ding, B. Hooi, Z. Zhang, H. Shen, and X. Cheng, \"Time series anomaly detection with adversarial reconstruction networks,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 4, pp. 4293-4306, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.398, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[80] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.434, + 0.492, + 0.468 + ], + "angle": 0, + "content": "[81] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio, \"Generative adversarial networks,\" Commun. ACM, vol. 63, no. 11, pp. 139–144, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.469, + 0.492, + 0.514 + ], + "angle": 0, + "content": "[82] L. Yang, Z. Zhang, Y. Song, S. Hong, R. Xu, Y. Zhao, W. Zhang, B. Cui, and M.-H. Yang, \"Diffusion models: A comprehensive survey of methods and applications,\" ACM Comput. Surv., vol. 56, no. 4, pp. 1-39, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.515, + 0.492, + 0.56 + ], + "angle": 0, + "content": "[83] S. Bond-Taylor, A. Leach, Y. Long, and C. G. Willcocks, “Deep generative modelling: A comparative review of vaes, gans, normalizing flows, energy-based and autoregressive models,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 44, no. 11, pp. 7327-7347, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.561, + 0.492, + 0.595 + ], + "angle": 0, + "content": "[84] S. Sheynin, S. Benaim, and L. Wolf, “A hierarchical transformation-discriminating generative model for few shot anomaly detection,” in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2021, pp. 8495-8504." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.596, + 0.492, + 0.63 + ], + "angle": 0, + "content": "[85] W. Lim, K. Y. S. Chek, L. B. Theng, and C. T. C. Lin, “Future of generative adversarial networks (gan) for anomaly detection in network security: A review,” Comput. Secur., p. 103733, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.631, + 0.492, + 0.665 + ], + "angle": 0, + "content": "[86] X. Du, J. Chen, J. Yu, S. Li, and Q. Tan, \"Generative adversarial nets for unsupervised outlier detection,\" Expert Syst. Appl., vol. 236, p. 121161, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.665, + 0.492, + 0.7 + ], + "angle": 0, + "content": "[87] J. Wu, Z. Zhao, C. Sun, R. Yan, and X. Chen, “Fault-attention generative probabilistic adversarial autoencoder for machine anomaly detection,” IEEE Trans. Ind. Inf., vol. 16, no. 12, pp. 7479–7488, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.7, + 0.492, + 0.735 + ], + "angle": 0, + "content": "[88] F. Dong, Y. Zhang, and X. Nie, \"Dual discriminator generative adversarial network for video anomaly detection,\" IEEE Access, vol. 8, pp. 88170-88176, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.736, + 0.492, + 0.77 + ], + "angle": 0, + "content": "[89] D. Guo, Z. Liu, and R. Li, \"Regraphgan: A graph generative adversarial network model for dynamic network anomaly detection,\" Neural Networks, vol. 166, pp. 273-285, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.771, + 0.492, + 0.805 + ], + "angle": 0, + "content": "[90] Y. Liu, Z. Li, C. Zhou, Y. Jiang, J. Sun, M. Wang, and X. He, \"Generative adversarial active learning for unsupervised outlier detection,\" IEEE Trans. Knowl. Data Eng., vol. 32, no. 8, pp. 1517-1528, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.806, + 0.492, + 0.862 + ], + "angle": 0, + "content": "[91] C. Liu, Z. Kong, S. Babu, C. Joslin, and J. Ferguson, \"An integrated manifold learning approach for high-dimensional data feature extractions and its applications to online process monitoring of additive manufacturing,\" IISE Transactions, vol. 53, no. 11, pp. 1215-1230, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.863, + 0.492, + 0.908 + ], + "angle": 0, + "content": "[92] J. Miao, H. Tao, H. Xie, J. Sun, and J. Cao, \"Reconstruction-based anomaly detection for multivariate time series using contrastive generative adversarial networks,\" Inf. Process. Manag., vol. 61, no. 1, p. 103569, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.909, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[93] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, \"Anomaly-gan: A data augmentation method for train surface anomaly detection,\" Expert Syst. Appl., vol. 228, p. 120284, 2023." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.072, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.071, + 0.921, + 0.117 + ], + "angle": 0, + "content": "[94] Y. Li, Z. Shi, C. Liu, W. Tian, Z. Kong, and C. B. Williams, \"Augmented time regularized generative adversarial network (atr-gan) for data augmentation in online process anomaly detection,\" IEEE Trans. Autom. Sci. Eng., vol. 19, no. 4, pp. 3338-3355, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.118, + 0.921, + 0.152 + ], + "angle": 0, + "content": "[95] L. Zhang, W. Bai, X. Xie, L. Chen, and P. Dong, “Tmanomaly: Time-series mutual adversarial networks for industrial anomaly detection,” IEEE Trans. Ind. Inform., 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.153, + 0.921, + 0.197 + ], + "angle": 0, + "content": "[96] B. Du, X. Sun, J. Ye, K. Cheng, J. Wang, and L. Sun, \"Gan-based anomaly detection for multivariate time series using polluted training set,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12 208-12 219, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.198, + 0.921, + 0.233 + ], + "angle": 0, + "content": "[97] G. Fan, Y. Ma, X. Mei, F. Fan, J. Huang, and J. Ma, “Hyperspectral anomaly detection with robust graph autoencoders,” IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.234, + 0.921, + 0.277 + ], + "angle": 0, + "content": "[98] S. Wang, X. Wang, L. Zhang, and Y. Zhong, \"Auto-ad: Autonomous hyperspectral anomaly detection network based on fully convolutional autoencoder,\" IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.278, + 0.921, + 0.313 + ], + "angle": 0, + "content": "[99] H. Liu, X. Su, X. Shen, and X. Zhou, \"Msnet: Self-supervised multiscale network with enhanced separation training for hyperspectral anomaly detection,\" IEEE Trans. Geosci. Remote Sens., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.314, + 0.921, + 0.347 + ], + "angle": 0, + "content": "[100] X. Lin, Z. Li, H. Fan, Y. Fu, and X. Chen, “Exploiting negative correlation for unsupervised anomaly detection in contaminated time series,” Expert Syst. Appl., p. 123535, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.348, + 0.921, + 0.392 + ], + "angle": 0, + "content": "[101] C. Huang, Z. Yang, J. Wen, Y. Xu, Q. Jiang, J. Yang, and Y. Wang, \"Self-supervision-augmented deep autoencoder for unsupervised visual anomaly detection,\" IEEE Trans. Cybern., vol. 52, no. 12, pp. 13834-13847, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.393, + 0.921, + 0.428 + ], + "angle": 0, + "content": "[102] C. Yin, S. Zhang, J. Wang, and N. N. Xiong, \"Anomaly detection based on convolutional recurrent autoencoder for IoT time series,\" IEEE Trans. Syst. Man Cybern.: Syst., vol. 52, no. 1, pp. 112-122, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.429, + 0.921, + 0.462 + ], + "angle": 0, + "content": "[103] W. Zhang, C. Zhang, and F. Tsung, “Grelen: Multivariate time series anomaly detection from the perspective of graph relational learning,” in IJCAI, 2022, pp. 2390–2397." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.463, + 0.921, + 0.496 + ], + "angle": 0, + "content": "[104] X. Zhou, Y. Hu, W. Liang, J. Ma, and Q. Jin, \"Variational lstm enhanced anomaly detection for industrial big data,\" IEEE Trans. Ind. Inform., vol. 17, no. 5, pp. 3469-3477, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.496, + 0.921, + 0.519 + ], + "angle": 0, + "content": "[105] A. Makhzani, J. Shlens, N. Jaitly, I. Goodfellow, and B. Frey, \"Adversarial autoencoders,\" arXiv preprint arXiv:1511.05644, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.52, + 0.921, + 0.553 + ], + "angle": 0, + "content": "[106] Q. Su, B. Tian, H. Wan, and J. Yin, \"Anomaly detection under contaminated data with contamination-immune bidirectional gans,\" IEEE Trans. Knowl. Data Eng., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.554, + 0.921, + 0.588 + ], + "angle": 0, + "content": "[107] J. Yu, X. Gao, F. Zhai, B. Li, B. Xue, S. Fu, L. Chen, and Z. Meng, \"An adversarial contrastive autoencoder for robust multivariate time series anomaly detection,\" Expert Syst. Appl., vol. 245, p. 123010, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.589, + 0.921, + 0.612 + ], + "angle": 0, + "content": "[108] J. Ho, A. Jain, and P. Abbeel, “Denoising diffusion probabilistic models,” Adv. Neural Inf. Process. Syst., vol. 33, pp. 6840–6851, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.613, + 0.921, + 0.646 + ], + "angle": 0, + "content": "[109] J. Wolleb, F. Bieder, R. Sandkühler, and P. C. Cattin, \"Diffusion models for medical anomaly detection,\" in Int. Conf. Med. Image Comput. Comput.-Assist. Interv. (MICCAI). Springer, 2022, pp. 35-45." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.647, + 0.921, + 0.68 + ], + "angle": 0, + "content": "[110] X. Zhang, N. Li, J. Li, T. Dai, Y. Jiang, and S.-T. Xia, \"Unsupervised surface anomaly detection with diffusion probabilistic model,\" in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2023, pp. 6782-6791." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.681, + 0.921, + 0.714 + ], + "angle": 0, + "content": "[111] S. Li, J. Yu, Y. Lu, G. Yang, X. Du, and S. Liu, \"Self-supervised enhanced denoising diffusion for anomaly detection,\" Inf. Sci., vol. 669, p. 120612, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.715, + 0.921, + 0.749 + ], + "angle": 0, + "content": "[112] J. Zeng, X. Liu, and Z. Li, \"Radio anomaly detection based on improved denoising diffusion probabilistic models,\" IEEE Commun. Lett., 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.749, + 0.921, + 0.784 + ], + "angle": 0, + "content": "[113] X. Li, C. Xiao, Z. Feng, S. Pang, W. Tai, and F. Zhou, \"Controlled graph neural networks with denoising diffusion for anomaly detection,\" Expert Syst. Appl., vol. 237, p. 121533, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.785, + 0.921, + 0.818 + ], + "angle": 0, + "content": "[114] C. Li, G. Feng, Y. Li, R. Liu, Q. Miao, and L. Chang, “Diffstad: Denoising diffusion probabilistic models for vehicle trajectory anomaly detection,” Knowledge-Based Systems, vol. 286, p. 111387, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.819, + 0.921, + 0.863 + ], + "angle": 0, + "content": "[115] J. Pei, J. Wang, D. Shi, and P. Wang, \"Detection and imputation-based two-stage denoising diffusion power system measurement recovery under cyber-physical uncertainties,\" IEEE Trans. Smart Grid, vol. 15, no. 6, pp. 5965-5980, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.864, + 0.921, + 0.908 + ], + "angle": 0, + "content": "[116] H. He, J. Zhang, H. Chen, X. Chen, Z. Li, X. Chen, Y. Wang, C. Wang, and L. Xie, \"A diffusion-based framework for multi-class anomaly detection,\" in Proc. AAAI Conf. Artif. Intell., vol. 38, no. 8, 2024, pp. 8472-8480." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.909, + 0.921, + 0.944 + ], + "angle": 0, + "content": "[117] A. Sherstinsky, “Fundamentals of recurrent neural network (rnn) and long short-term memory (lstm) network,” Physica D: Nonlinear Phenomena, vol. 404, p. 132306, 2020." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.427, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.906, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "23" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.071, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[118] G. Van Houdt, C. Mosquera, and G. Nápoles, “A review on the long short-term memory model,” Artif. Intell. Rev., vol. 53, no. 8, pp. 5929–5955, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.107, + 0.492, + 0.141 + ], + "angle": 0, + "content": "[119] B. Lindemann, B. Maschler, N. Sahlab, and M. Weyrich, “A survey on anomaly detection for technical systems using lstm networks,” Comput. Ind., vol. 131, p. 103498, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.142, + 0.492, + 0.175 + ], + "angle": 0, + "content": "[120] R. Dey and F. M. Salem, “Gate-variants of gated recurrent unit (gru) neural networks,” in Proc. 2017 IEEE 60th Int. Midwest Symp. Circuits Syst. (MWSCAS). IEEE, 2017, pp. 1597–1600." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.176, + 0.492, + 0.209 + ], + "angle": 0, + "content": "[121] Y. Wang, X. Du, Z. Lu, Q. Duan, and J. Wu, \"Improved lstm-based time-series anomaly detection in rail transit operation environments,\" IEEE Trans. Ind. Inform., vol. 18, no. 12, pp. 9027-9036, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.21, + 0.492, + 0.244 + ], + "angle": 0, + "content": "[122] H. Chen, H. Liu, X. Chu, Q. Liu, and D. Xue, \"Anomaly detection and critical scada parameters identification for wind turbines based on lstm-ae neural network,\" Renew. Energy, vol. 172, pp. 829-840, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.245, + 0.492, + 0.279 + ], + "angle": 0, + "content": "[123] P. Liu, X. Sun, Y. Han, Z. He, W. Zhang, and C. Wu, \"Arrhythmia classification of lstm autoencoder based on time series anomaly detection,\" Biomed. Signal Process. Control, vol. 71, p. 103228, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.279, + 0.492, + 0.313 + ], + "angle": 0, + "content": "[124] Y. Yao, J. Ma, S. Feng, and Y. Ye, \"Svd-ae: An asymmetric autoencoder with svd regularization for multivariate time series anomaly detection,\" Neural Networks, vol. 170, pp. 535-547, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.314, + 0.492, + 0.358 + ], + "angle": 0, + "content": "[125] S. Longari, D. H. N. Valcarcel, M. Zago, M. Carminati, and S. Zanero, \"Cannolo: An anomaly detection system based on lstm autoencoders for controller area network,\" IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1913-1924, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.359, + 0.492, + 0.404 + ], + "angle": 0, + "content": "[126] J. Pei, J. Wang, and D. Shi, \"Data-driven measurement tampering detection considering spatial-temporal correlations,\" in Proc. 2019 IEEE 3rd Conf. Energy Internet Energy Syst. Integr. (EI2), 2019, pp. 2641-2646." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.405, + 0.492, + 0.439 + ], + "angle": 0, + "content": "[127] T. Lei, C. Gong, G. Chen, M. Ou, K. Yang, and J. Li, “A novel unsupervised framework for time series data anomaly detection via spectrum decomposition,” Knowledge-Based Systems, vol. 280, p. 111002, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.439, + 0.492, + 0.473 + ], + "angle": 0, + "content": "[128] D. Hu, S. Wu, J. Wang, and D. Shi, \"Training a dynamic neural network to detect false data injection attacks under multiple unforeseen operating conditions,\" IEEE Trans. Smart Grid, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.474, + 0.492, + 0.508 + ], + "angle": 0, + "content": "[129] C. Tang, L. Xu, B. Yang, Y. Tang, and D. Zhao, “Gru-based interpretable multivariate time series anomaly detection in industrial control system,” Comput. Secur., vol. 127, p. 103094, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.508, + 0.492, + 0.553 + ], + "angle": 0, + "content": "[130] J. Yu, X. Gao, B. Li, F. Zhai, J. Lu, B. Xue, S. Fu, and C. Xiao, \"A filter-augmented auto-encoder with learnable normalization for robust multivariate time series anomaly detection,\" Neural Networks, vol. 170, pp. 478-493, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.554, + 0.492, + 0.576 + ], + "angle": 0, + "content": "[131] A. Vaswani, \"Attention is all you need,\" Adv. Neural Inf. Process. Syst., 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.577, + 0.492, + 0.611 + ], + "angle": 0, + "content": "[132] H. Kang and P. Kang, \"Transformer-based multivariate time series anomaly detection using inter-variable attention mechanism,\" Knowledge-Based Systems, p. 111507, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.612, + 0.492, + 0.646 + ], + "angle": 0, + "content": "[133] J. Kim, H. Kang, and P. Kang, “Time-series anomaly detection with stacked transformer representations and 1d convolutional network,” Eng. Appl. Artif. Intell., vol. 120, p. 105964, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.646, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[134] S. Tuli, G. Casale, and N. R. Jennings, “Tranad: Deep transformer networks for anomaly detection in multivariate time series data,” arXiv preprint arXiv:2201.07284, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.68, + 0.492, + 0.714 + ], + "angle": 0, + "content": "[135] C. Wang and G. Liu, “From anomaly detection to classification with graph attention and transformer for multivariate time series,” Adv. Eng. Inform., vol. 60, p. 102357, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.715, + 0.492, + 0.749 + ], + "angle": 0, + "content": "[136] J. Fan, Z. Wang, H. Wu, D. Sun, J. Wu, and X. Lu, \"An adversarial time-frequency reconstruction network for unsupervised anomaly detection,\" Neural Networks, vol. 168, pp. 44-56, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.749, + 0.492, + 0.794 + ], + "angle": 0, + "content": "[137] Y. Shi, B. Wang, Y. Yu, X. Tang, C. Huang, and J. Dong, \"Robust anomaly detection for multivariate time series through temporal GCNs and attention-based vae,\" Knowledge-Based Systems, vol. 275, p. 110725, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.795, + 0.492, + 0.83 + ], + "angle": 0, + "content": "[138] C. Ding, S. Sun, and J. Zhao, \"Mst-gat: A multimodal spatial-temporal graph attention network for time series anomaly detection,\" Inf. Fusion, vol. 89, pp. 527-536, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.83, + 0.492, + 0.864 + ], + "angle": 0, + "content": "[139] W. Zhu, W. Li, E. R. Dorsey, and J. Luo, \"Unsupervised anomaly detection by densely contrastive learning for time series data,\" Neural Networks, vol. 168, pp. 450-458, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.864, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[140] H. Sun, M. Chen, J. Weng, Z. Liu, and G. Geng, \"Anomaly detection for in-vehicle network using cnn-lstm with attention mechanism,\" IEEE Trans. Veh. Technol., vol. 70, no. 10, pp. 10880-10893, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.899, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[141] T. Le, H. C. Vu, A. Ponchet-Durupt, N. Boudaoud, Z. Cherfi-Boulanger, and T. Nguyen-Trang, \"Unsupervised detecting anomalies in multivariate time series by robust convolutional LSTM encoder-decoder (rcled),\" Neurocomputing, vol. 592, p. 127791, 2024." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.071, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.117 + ], + "angle": 0, + "content": "[142] M. Jin, H. Y. Koh, Q. Wen, D. Zambon, C. Alippi, G. I. Webb, I. King, and S. Pan, “A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection,” IEEE Trans. Pattern Anal. Mach. Intell., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.118, + 0.921, + 0.152 + ], + "angle": 0, + "content": "[143] Y. Wu, H.-N. Dai, and H. Tang, \"Graph neural networks for anomaly detection in industrial internet of things,\" IEEE Internet Things J., vol. 9, no. 12, pp. 9214-9231, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.153, + 0.921, + 0.187 + ], + "angle": 0, + "content": "[144] H. Kim, B. S. Lee, W.-Y. Shin, and S. Lim, “Graph anomaly detection with graph neural networks: Current status and challenges,” IEEE Access, vol. 10, pp. 111820-111829, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.187, + 0.921, + 0.221 + ], + "angle": 0, + "content": "[145] A. Deng and B. Hooi, “Graph neural network-based anomaly detection in multivariate time series,” in Proc. AAAI Conf. Artif. Intell. (AAAI), vol. 35, no. 5, 2021, pp. 4027–4035." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.222, + 0.921, + 0.266 + ], + "angle": 0, + "content": "[146] Z. Chen, D. Chen, X. Zhang, Z. Yuan, and X. Cheng, “Learning graph structures with transformer for multivariate time-series anomaly detection in IoT,” IEEE Internet Things J., vol. 9, no. 12, pp. 9179–9189, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.267, + 0.921, + 0.313 + ], + "angle": 0, + "content": "[147] Y. Zheng, H. Y. Koh, M. Jin, L. Chi, K. T. Phan, S. Pan, Y.-P. P. Chen, and W. Xiang, \"Correlation-aware spatial-temporal graph learning for multivariate time-series anomaly detection,\" IEEE Trans. Neural Netw. Learn. Syst., 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.314, + 0.921, + 0.357 + ], + "angle": 0, + "content": "[148] Y. Liu, Z. Li, S. Pan, C. Gong, C. Zhou, and G. Karypis, \"Anomaly detection on attributed networks via contrastive self-supervised learning,\" IEEE Trans. Neural Netw. Learn. Syst., vol. 33, no. 6, pp. 2378-2392, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.359, + 0.921, + 0.404 + ], + "angle": 0, + "content": "[149] H. Zhao, Y. Wang, J. Duan, C. Huang, D. Cao, Y. Tong, B. Xu, J. Bai, J. Tong, and Q. Zhang, \"Multivariate time-series anomaly detection via graph attention network,\" in Proc. 2020 IEEE Int. Conf. Data Min. (ICDM)). IEEE, 2020, pp. 841-850." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.405, + 0.921, + 0.45 + ], + "angle": 0, + "content": "[150] W. Chen, L. Tian, B. Chen, L. Dai, Z. Duan, and M. Zhou, “Deep variational graph convolutional recurrent network for multivariate time series anomaly detection,” in Int. Conf. Mach. Learn. (ICML). PMLR, 2022, pp. 3621–3633." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.451, + 0.921, + 0.495 + ], + "angle": 0, + "content": "[151] S. Han and S. S. Woo, \"Learning sparse latent graph representations for anomaly detection in multivariate time series,\" in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min. (KDD), 2022, pp. 2977-2986." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.496, + 0.921, + 0.531 + ], + "angle": 0, + "content": "[152] Y. Tang, L. Zhao, S. Zhang, C. Gong, G. Li, and J. Yang, \"Integrating prediction and reconstruction for anomaly detection,\" Pattern Recognit. Lett., vol. 129, pp. 123-130, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.531, + 0.921, + 0.576 + ], + "angle": 0, + "content": "[153] M. Zheng, J. Man, D. Wang, Y. Chen, Q. Li, and Y. Liu, \"Semisupervised multivariate time series anomaly detection for wind turbines using generator scada data,\" Reliab. Eng. Syst. Saf., vol. 235, p. 109235, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.577, + 0.921, + 0.611 + ], + "angle": 0, + "content": "[154] Y. Wei, J. Jang-Jaccard, W. Xu, F. Sabrina, S. Camtepe, and M. Boulic, \"Lstm-autoencoder-based anomaly detection for indoor air quality time-series data,\" IEEE Sens. J., vol. 23, no. 4, pp. 3787-3800, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.611, + 0.921, + 0.646 + ], + "angle": 0, + "content": "[155] G. Pu, L. Wang, J. Shen, and F. Dong, “A hybrid unsupervised clustering-based anomaly detection method,” Tsinghua Sci. Technol., vol. 26, no. 2, pp. 146–153, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.646, + 0.921, + 0.679 + ], + "angle": 0, + "content": "[156] B. Liu, Y. Xiao, L. Cao, Z. Hao, and F. Deng, \"Svdd-based outlier detection on uncertain data,\" Knowl. Inf. Syst., vol. 34, pp. 597-618, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.68, + 0.921, + 0.714 + ], + "angle": 0, + "content": "[157] A. P. Muniyandi, R. Rajeswari, and R. Rajaram, \"Network anomaly detection by cascading k-means clustering and c4. 5 decision tree algorithm,\" *Proceedia Eng.*, vol. 30, pp. 174-182, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.714, + 0.921, + 0.759 + ], + "angle": 0, + "content": "[158] A. M. Ikotun, A. E. Ezugwu, L. Abualigah, B. Abuhaija, and J. Heming, \"K-means clustering algorithms: A comprehensive review, variants analysis, and advances in the era of big data,\" Inf. Sci., vol. 622, pp. 178-210, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.76, + 0.921, + 0.794 + ], + "angle": 0, + "content": "[159] H. V. Singh, A. Girdhar, and S. Dahiya, “A literature survey based on dbscan algorithms,” in Proc. 2022 6th Int. Conf. Intell. Comput. Control Syst. (ICICCS). IEEE, 2022, pp. 751-758." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.795, + 0.921, + 0.829 + ], + "angle": 0, + "content": "[160] F. Murtagh and P. Contreras, “Algorithms for hierarchical clustering: an overview,” Wiley Interdiscip. Rev. Data Min. Knowl. Discov., vol. 2, no. 1, pp. 86–97, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.829, + 0.921, + 0.864 + ], + "angle": 0, + "content": "[161] J. Li, H. Izakian, W. Pedrycz, and I. Jamal, \"Clustering-based anomaly detection in multivariate time series data,\" Appl. Soft Comput., vol. 100, p. 106919, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.864, + 0.921, + 0.908 + ], + "angle": 0, + "content": "[162] A. Markovitz, G. Sharir, I. Friedman, L. Zelnik-Manor, and S. Avidan, \"Graph embedded pose clustering for anomaly detection,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2020, pp. 10539-10547." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.909, + 0.921, + 0.944 + ], + "angle": 0, + "content": "[163] S. Qiu, J. Ye, J. Zhao, L. He, L. Liu, E. Bicong, and X. Huang, “Video anomaly detection guided by clustering learning,” Pattern Recognit., vol. 153, p. 110550, 2024." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.03, + 0.427, + 0.042 + ], + "angle": 0, + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "24" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[164] I. Kobyzev, S. J. Prince, and M. A. Brubaker, “Normalizing flows: An introduction and review of current methods,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 11, pp. 3964–3979, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.107, + 0.492, + 0.14 + ], + "angle": 0, + "content": "[165] J. Yu, Y. Zheng, X. Wang, W. Li, Y. Wu, R. Zhao, and L. Wu, \"Fastflow: Unsupervised anomaly detection and localization via 2d normalizing flows,\" arXiv preprint arXiv:2111.07677, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.141, + 0.492, + 0.174 + ], + "angle": 0, + "content": "[166] M. Cho, T. Kim, W. J. Kim, S. Cho, and S. Lee, \"Unsupervised video anomaly detection via normalizing flows with implicit latent features,\" Pattern Recognit., vol. 129, p. 108703, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.175, + 0.492, + 0.197 + ], + "angle": 0, + "content": "[167] Q. Zhou, S. He, H. Liu, J. Chen, and W. Meng, \"Label-free multivariate time series anomaly detection,\" IEEE Trans. Knowl. Data Eng., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.198, + 0.492, + 0.23 + ], + "angle": 0, + "content": "[168] E. Dai and J. Chen, \"Graph-augmented normalizing flows for anomaly detection of multiple time series,\" arXiv preprint arXiv:2202.07857, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.231, + 0.492, + 0.263 + ], + "angle": 0, + "content": "[169] Y. Zhou, X. Liang, W. Zhang, L. Zhang, and X. Song, \"Vae-based deep svdd for anomaly detection,\" Neurocomputing, vol. 453, pp. 131-140, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.264, + 0.492, + 0.299 + ], + "angle": 0, + "content": "[170] Z. Zhang and X. Deng, \"Anomaly detection using improved deep svdd model with data structure preservation,\" Pattern Recognit. Lett., vol. 148, pp. 1-6, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.3, + 0.492, + 0.332 + ], + "angle": 0, + "content": "[171] J. Luo, J. Lin, Z. Yang, and H. Liu, \"Smd anomaly detection: A self-supervised texture-structure anomaly detection framework,\" IEEE Trans. Instrum. Meas., vol. 71, pp. 1-11, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.333, + 0.492, + 0.367 + ], + "angle": 0, + "content": "[172] C.-L. Li, K. Sohn, J. Yoon, and T. Pfister, \"Cutpaste: Self-supervised learning for anomaly detection and localization,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2021, pp. 9664-9674." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.368, + 0.492, + 0.401 + ], + "angle": 0, + "content": "[173] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, \"Anomaly-gan: A data augmentation method for train surface anomaly detection,\" Expert Syst. Appl., vol. 228, p. 120284, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.401, + 0.492, + 0.434 + ], + "angle": 0, + "content": "[174] Q. Wen, L. Sun, F. Yang, X. Song, J. Gao, X. Wang, and H. Xu, \"Time series data augmentation for deep learning: A survey,\" arXiv preprint arXiv:2002.12478, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.435, + 0.492, + 0.469 + ], + "angle": 0, + "content": "[175] H. Hojjati, T. K. K. Ho, and N. Armanfard, \"Self-supervised anomaly detection in computer vision and beyond: A survey and outlook,\" Neural Networks, vol. 172, p. 106106, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.47, + 0.492, + 0.513 + ], + "angle": 0, + "content": "[176] X. Zhang, M. Xu, and X. Zhou, “Realnet: A feature selection network with realistic synthetic anomaly for anomaly detection,” in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2024, pp. 16699–16708." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.514, + 0.492, + 0.548 + ], + "angle": 0, + "content": "[177] F. Van Wyk, Y. Wang, A. Khojandi, and N. Masoud, “Real-time sensor anomaly detection and identification in automated vehicles,” IEEE Trans. Intell. Transp. Syst., vol. 21, no. 3, pp. 1264–1276, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.549, + 0.492, + 0.592 + ], + "angle": 0, + "content": "[178] M. Abouof, R. Mizouni, S. Singh, H. Otrok, and E. Damiani, \"Self-supervised online and lightweight anomaly and event detection for IoT devices,\" IEEE Internet Things J, vol. 9, no. 24, pp. 25 285-25 299, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.593, + 0.492, + 0.638 + ], + "angle": 0, + "content": "[179] X. Zhou, J. Wu, W. Liang, I. Kevin, K. Wang, Z. Yan, L. T. Yang, and Q. Jin, \"Reconstructed graph neural network with knowledge distillation for lightweight anomaly detection,\" IEEE Trans. Neural Netw. Learn. Syst., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.639, + 0.492, + 0.672 + ], + "angle": 0, + "content": "[180] Y. Zhao, G. H. Chen, and Z. Jia, “Tod: GPU-accelerated outlier detection via tensor operations,” arXiv preprint arXiv:2110.14007, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.673, + 0.492, + 0.706 + ], + "angle": 0, + "content": "[181] A. Al-Mazrawe and B. Al-Musawi, “Anomaly detection in cloud network: A review,” in BIO Web of Conferences, vol. 97. EDP Sciences, 2024, p. 00019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.707, + 0.492, + 0.73 + ], + "angle": 0, + "content": "[182] Z. Niu, G. Zhong, and H. Yu, “A review on the attention mechanism of deep learning,” Neurocomputing, vol. 452, pp. 48–62, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.73, + 0.492, + 0.774 + ], + "angle": 0, + "content": "[183] H. Liu, X. Huang, M. Jia, T. Jia, J. Han, Y. Li, and Z. Wu, \"Uac-ad: Unsupervised adversarial contrastive learning for anomaly detection on multi-modal data in microservice systems,\" IEEE Trans. Serv. Comput., 2024." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.774 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_origin.pdf b/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e477d25ed0a44c4afc0626ad36c51793c4d9cf90 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8542de88759286f2470808ae619300ab415d5ffe73c6ba8a6e5328098086dc54 +size 5339182 diff --git a/data/2025/2503_13xxx/2503.13195/full.md b/data/2025/2503_13xxx/2503.13195/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3695d02b241ba335313292696c815c191c424641 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/full.md @@ -0,0 +1,658 @@ +# Deep Learning Advancements in Anomaly Detection: A Comprehensive Survey + +Haoqi Huang, Ping Wang $\text{©}$ , Fellow, IEEE, Jianhua Pei $\text{©}$ , Graduate Student Member, IEEE, Jiacheng Wang $\text{©}$ , Shahren Alexanian, and Dusit Niyato $\text{©}$ , Fellow, IEEE + +Abstract—The rapid expansion of data from diverse sources has made anomaly detection (AD) increasingly essential for identifying unexpected observations that may signal system failures, security breaches, or fraud. As datasets become more complex and high-dimensional, traditional detection methods struggle to effectively capture intricate patterns. Advances in deep learning have made AD methods more powerful and adaptable, improving their ability to handle high-dimensional and unstructured data. This survey provides a comprehensive review of over 180 recent studies, focusing on deep learning-based AD techniques. We categorize and analyze these methods into reconstruction-based and prediction-based approaches, highlighting their effectiveness in modeling complex data distributions. Additionally, we explore the integration of traditional and deep learning methods, highlighting how hybrid approaches combine the interpretability of traditional techniques with the flexibility of deep learning to enhance detection accuracy and model transparency. Finally, we identify open issues and propose future research directions to advance the field of AD. This review bridges gaps in existing literature and serves as a valuable resource for researchers and practitioners seeking to enhance AD techniques using deep learning. + +Index Terms—Anomaly detection, deep learning, data reconstruction and prediction, Internet of things, comprehensive survey. + +# I. INTRODUCTION + +An anomaly refers to an observation that significantly deviates from the expected behavior in a system, often appearing unusual, inconsistent, or unexpected [1]. Despite the fact that outliers typically constitute only a small fraction of a dataset, they are often highly crucial because they carry important information and can reveal critical insights during analysis. Consequently, anomaly detection (AD) is the process of identifying such anomalous observations using various methods and algorithms, which aids decision-makers in better understanding data patterns and behaviors. + +The rapid development of the Internet of Things (IoT) has revolutionized the way data is generated, collected, and analyzed across various domains. IoT systems leverage a wide array of interconnected sensors and devices to collect massive + +H. Huang, P. Wang and S. Alexanian are with the Lassonde School of Engineering, York University, Toronto, ON M3J 1P3, Canada (e-mail:joycehhq@yorku.ca; pingw@yorku.ca; yu263319@my.yorku.ca). J. Pei is with the State Key Laboratory of Advanced Electromagnetic Technology, School of Electrical and Electronic Engineering, Huazhong University of Science and Technology, Wuhan 430074, China (e-mail: jianhuapei@hust.edu.cn). J. Wang and D. Niyato are with the School of Computer Science and Engineering, Nanyang Technological University, Singapore (e-mail: jcwang_cq@foxmail.com; dniyato@ntu.edu.sg). + +amounts of real-time data in diverse applications, including smart cities [2], industrial automation [3], healthcare [4], and transportation [5], etc. This proliferation of sensor data introduces unprecedented opportunities for enhancing operational efficiency and decision-making processes. However, it also presents significant challenges, as the data is often high-dimensional, noisy, and prone to anomalies caused by faulty sensors, environmental changes, or malicious attacks [6]. Detecting anomalies in data is critical for ensuring system reliability, security, and performance [7]. + +AD methodologies can be systematically classified according to various criteria. One prominent classification framework differentiates these methods into supervised, semi-supervised, and unsupervised approaches, predicated on the availability and nature of labeled data [8]. Supervised learning-based AD algorithms necessitate a fully labeled dataset, where each data point is explicitly annotated as either normal or anomalous. This labeling process facilitates the model's ability to discern and learn the underlying characteristics that differentiate anomalous instances from normal ones, thereby enhancing its detection accuracy. Semi-supervised learning-based methods, on the other hand, operate with a dataset comprising a substantial volume of unlabeled data alongside a smaller subset of labeled instances. These labels may include both normal and anomalous data, or in certain cases, solely normal instances [9]. In scenarios where only normal data is labeled, the semi-supervised approach converges towards unsupervised methodologies, as the model predominantly learns normal behavior patterns and identifies anomalies as deviations from these learned patterns. Unsupervised learning-based AD methods eschew the need for labeled data entirely, leveraging the intrinsic structural properties of the dataset to autonomously identify anomalies [10] [11]. In practical applications, a significant portion of contemporary AD research gravitates towards unsupervised methods [12]. This preference is largely driven by the substantial imbalance between the number of normal instances and anomalies, which complicates the acquisition of a sufficiently large labeled dataset required for effective supervised learning [13]. Moreover, anomalies are frequently correlated with critical failures or hazardous events, rendering the labeling process both costly and logistically challenging. Another key classification criterion is the nature of the dataset, particularly whether it comprises time-series data, which distinguishes AD methods into time-series [14] and non-temporal approaches. The applications of time-series and non-temporal AD will be discussed in detail in Section III. + +In addition to the temporal aspect, AD techniques can also + +be categorized based on their underlying paradigms: traditional methods and deep learning-based methods. Traditional techniques encompass statistical approaches [15], distance-based methods [16], and clustering algorithms [17]. These approaches generally rely on estimating the probability distribution of normal data to predict anomalies. However, since the early 20th century, the fields of data science, machine learning, deep learning, and artificial intelligence have witnessed exponential growth, with significant implications for AD [18]. Particularly in recent years, the advent of soft-computing techniques has significantly influenced the development of deep learning-based methods. These techniques are characterized by their ability to handle imprecise, uncertain, and nonlinear data, making them highly suitable for applications involving deep learning. Consequently, deep learning-based methods have been propelled to the forefront due to their superior capability to learn expressive representations of complex data, including high-dimensional, temporal, spatial, and graph-structured data [19]. By proficiently modeling intricate patterns and relationships inherent in the data, deep learning approaches have proven remarkably effective in identifying anomalies across a wide range of challenging and complex datasets. This paper concentrates specifically on AD methods based on deep learning models, with the objective of providing a comprehensive review of this rapidly evolving field. + +# A. Contrasting Traditional Models with Deep Learning Models + +Traditional AD methods [20], such as statistical techniques, clustering algorithms [21], and Principal Component Analysis (PCA) [22], have long been established as reliable tools across a wide spectrum of applications due to their simplicity, interpretability, and low computational overhead. These characteristics make them particularly promising in scenarios where model transparency and efficiency are paramount. Statistical techniques, for example, provide clear, rule-based mechanisms for detecting anomalies, while clustering algorithms are effective in grouping similar data points and isolating outliers in relatively low-dimensional datasets. Similarly, PCA has been widely adopted for dimensionality reduction, enabling effective AD by isolating principal components that capture major variations in the data [17]. Despite these advantages, traditional methods often encounter significant limitations when applied to modern, complex datasets. Statistical techniques generally assume that data adheres to specific distributions. However, this assumption is rarely met in real-world scenarios, where data often exhibits non-Gaussian distributions and heavy tails. Clustering-based methods, while useful in many contexts, check to accurately define clusters, particularly when anomalies do not present clear separability from normal data. PCA, on the other hand, relies heavily on the assumption of linearity and extensive feature engineering, making it less effective at capturing the nuanced, non-linear patterns prevalent in high-dimensional datasets [22]. These constraints have prompted a shift towards more advanced approaches capable of handling the increasing complexity of modern data. + +In contrast, deep learning models have recently emerged as a powerful alternative, addressing many of the shortcom + +![](images/01a5c0345962b2f97cd0ef64d890f1c98d4af1b99d38c5a98823b19ce41cfe61.jpg) +Fig. 1. The anatomy of this survey. + +ings inherent in traditional approaches. Deep neural networks (DNNs) possess the capacity to autonomously learn complex patterns and hierarchical representations from raw data, thereby obviating the need for labor-intensive feature engineering [23]. This characteristic is particularly advantageous in the detection of subtle and multifaceted anomalies that might elude traditional methods [24]. By leveraging their multilayered architectures, deep learning models excel in processing high-dimensional and unstructured data, such as images, videos, and text, which are often challenging for conventional methods to handle effectively [25]. These models are adept at capturing non-linear relationships and interactions within the data, offering a more flexible and robust framework for AD + +[26]. Consequently, there has been a significant shift away from purely traditional AD techniques towards the adoption of deep learning methodologies. + +Nonetheless, it is crucial to acknowledge that traditional AD models retain certain advantages, notably in their simplicity, interpretability, and lower computational overheads [27]. These characteristics make them particularly appealing in scenarios where model transparency and computational efficiency are crucial. In recognition of these strengths, Section V of this paper will introduce and discuss various existing approaches that integrate traditional methods with deep learning techniques. These hybrid methods aim to leverage the strengths of both paradigms, resulting in more robust and efficient AD systems. + +# B. Comparison With Existing Surveys + +In recent years, the field of AD has seen a surge in research, particularly with the advent of deep learning methods. Numerous surveys have been published, each attempting to provide a comprehensive overview of the field. However, many of these surveys focus on broader historical developments or cover deep learning techniques only up to a certain point in time. For example, surveys such as [19], [28], [29], and [23] primarily cover techniques developed up to 2020. While these surveys are valuable, they do not reflect the most recent advancements in the field. Furthermore, specific models such as Generative Adversarial Network (GAN)-based AD have been explored in-depth by studies [30], [31], [32], [33], and [34]. However, these studies primarily address foundational approaches and lack coverage of advanced techniques like conditional GANs, cycle-consistent GANs, and GANs integrated with self-supervised learning. Emerging hybrid models, combining GANs with Variational Autoencoders (VAEs) or autoencoders for improved robustness, are also underrepresented. In contrast, our survey covers the literature from 2019 to 2024, providing a timely and comprehensive overview of the latest advancements. By focusing on recent trends and evolving techniques, including enhanced architectures and hybrid frameworks, our work offers a more current perspective, bridging existing gaps and guiding future research directions in AD. + +# C. Contributions and Structure + +This survey systematically reviews over 160 recent research papers on AD, including publications from leading journals (IEEE, ACM, Springer, Elsevier) and top-tier conferences (AAAI, CCS, ICCV) spanning from 2019 to 2024. By focusing on cutting-edge advancements in deep learning-based methods, this survey ensures a comprehensive and up-to-date overview of the field. The contributions of this survey are summarized as follows: + +- This survey addresses gaps in prior surveys by highlighting advanced techniques that were previously underexplored, including conditional GANs, cycle-consistent GANs, and hybrid frameworks combining GANs with VAEs. These models are introduced and analyzed to demonstrate their strengths and weaknesses. + +- This survey provides a detailed comparison of reconstruction-based and prediction-based methods. To enhance clarity and usability, we summarize key strengths, weaknesses, and applications in structured tables, offering readers insights into the trade-offs of different models. + +- Recognizing the strengths of traditional methods, this survey explores their integration with deep learning models. Hybrid approaches, such as clustering, normalizing flows, and support vector data descriptions combined with deep learning, are analyzed to address complex challenges in AD. + +The organization of this survey is shown in Fig.1. Section II provides an overview of data characteristics and anomaly types, followed by a discussion of common data processing challenges and mitigation strategies critical to effective AD. Section III explores the related applications of AD. Section IV categorizes and analyzes deep learning methods for AD, highlighting their effectiveness and limitations. Section V discusses the integration of traditional methods with deep learning, including clustering methods, normalizing flows, and support vector data descriptions. Section VI highlights open issues and future directions, such as challenges in data collection, computational complexity, explainability, and handling diverse anomaly types. Finally, Section VII concludes the survey with a summary and potential directions for future research. + +# II. DATA CHARACTERISTICS AND CHALLENGES + +# A. Overview of Input Data and Anomaly Types + +In AD, input data presents unique challenges due to its structure, dimensionality, and temporal nature. Different types of data require specialized techniques to effectively identify anomalies, and the nature of anomalies themselves can vary greatly depending on the domain and data format [28]. For instance, visual data such as images and videos may exhibit anomalies associated with spatial or temporal inconsistencies, while time series data often involves anomalies related to trends or sudden changes in values over time. To better understand these variations, we first categorize data into textual, audio, image, and video formats, highlighting their respective characteristics and the challenges they pose for AD. Beyond this classification, data can also be viewed through the lens of temporal dependencies, distinguishing between time-series data, which captures sequential patterns over time, and nontemporal data, where observations are independent of temporal order. This dual perspective provides a comprehensive framework for analyzing how different types of anomalies manifest across various data formats. Furthermore, the nature of anomalies themselves can vary depending on the data format. Point anomalies, sequence anomalies, and outliers may all manifest differently across different data types and structures. Understanding these distinctions is essential for selecting the appropriate AD techniques [29], as a deep understanding of data characteristics and anomaly types ensures that detection methods are effectively tailored to capture the specific behaviors and patterns indicative of anomalies. + +# 1) Categorization by Data Type: + +- Textual Data: Textual data consists of sequences of discrete symbols, such as characters, words, or phrases, structured in a linear format. Unlike other data types, textual data conveys information through syntactic and semantic relationships. It can be found in various forms, including documents, chat messages, emails, and system logs. Anomalies in textual data may appear as irregular word sequences, syntactic inconsistencies, missing or misplaced words, or semantically incoherent phrases. + +- Audio Data: Audio data captures variations in amplitude and frequency over time, representing spoken language, environmental sounds, or machine signals. It can be stored as waveforms or transformed into frequency-domain representations like spectrograms. Unlike textual data, audio data is continuous and often requires spectral analysis to extract meaningful patterns. Anomalies in audio data manifest as unexpected distortions, unusual frequency shifts, missing segments, or abnormal sound patterns caused by malfunctioning equipment, altered speech, or environmental noise. + +- Image Data: Image data consists of two-dimensional pixel grids, where each pixel represents intensity or color information. Unlike sequential data, image data encodes spatial relationships, capturing textures, shapes, and patterns. Image anomalies often appear as distortions, irregular textures, missing components, or unexpected objects that deviate from normal patterns. For instance, these can result from manufacturing defects, medical imaging errors, or environmental changes in satellite imagery. + +- Video Data: Video data extends image data by incorporating a temporal dimension, forming sequences of frames over time. Each frame within a video is an image, and the relationships between frames capture motion and dynamic interactions [35]. Unlike static images, video data requires modeling temporal dependencies, making AD more complex. Anomalies in video data include irregular movements, unexpected scene transitions, or unusual object behaviors, which are commonly observed in surveillance footage, traffic monitoring, and activity recognition. + +- Tabular Data: Tabular data consists of structured records organized in rows and columns, where each row represents an entity or event, and each column corresponds to an attribute. This type of data is widely used in databases, spreadsheets, financial records, and sensor logs. Unlike the other data types, tabular data can contain numerical, categorical, or mixed-format information. Anomalies in tabular data include missing values, unexpected categorical labels, numerical outliers, or inconsistent relationships between attributes. + +# 2) Categorization by Temporal Characteristics: + +- Time-based data: Time-based data can be represented as a sequence of observations recorded over time, and it may consist of either a single variable (univariate) or multiple variables (multivariate). We can generalize the + +representation of both univariate and multivariate time series using the following formula: $X = \{x_{t,j}\}_{t\in T,j\in J}$ , where $t\in T$ denotes the time index, with $t$ representing a specific time step and $T$ being the set of all time steps in the dataset. Similarly, $j\in J$ represents the dimension or variable index, where $j$ refers to a particular variable and $J$ is the set of all variables or dimensions in the data. When $|J| = 1$ , the series is univariate, meaning there is only one variable observed over time. In contrast, when $|J| > 1$ , the series is multivariate, indicating that multiple variables are recorded simultaneously at each time step. Each observation $x_{t,j}$ corresponds to the value of the $j$ -th variable at time $t$ . Among the five previously introduced data types, audio, video, and certain types of textual and tabular data are inherently time-based. Audio data is naturally sequential, with sound signals evolving over time, making anomalies such as distortions or frequency shifts dependent on temporal patterns. Video data extends image sequences over time, requiring the detection of abnormal object movements, scene transitions, or motion inconsistencies. Textual data, such as streaming logs, system event records, or chat conversations, also exhibits temporal dependencies, where anomalies may appear as unexpected event sequences or irregular timing between log entries. Similarly, tabular data in the form of financial transactions, sensor readings, or stock prices follows a time-series format, where anomalies may indicate fraud, equipment failure, or unusual market behaviors. + +- Non-temporal data: Non-temporal data refers to observations that lack a temporal sequence, where the relationships between data points are independent of time. Such data is prevalent across industries that rely on static datasets or event-based observations. AD in non-temporal data focuses on identifying irregularities by analyzing data characteristics, patterns, or statistical properties rather than temporal dependencies. This process is crucial for uncovering hidden risks, fraudulent activities, or system malfunctions in contexts where time is not a defining factor. Among the five data types, image and certain types of tabular data are the most common forms of non-temporal data. Image data, such as medical scans, industrial defect detection images, or satellite photos, captures spatial relationships but does not depend on a temporal sequence. Anomalies in such data typically appear as unusual textures, distortions, or unexpected objects. Tabular data, when not used for time-series analysis, is also non-temporal, such as customer records, product attributes, or static financial datasets. In these cases, AD focuses on identifying outliers, inconsistencies, or unusual relationships between different features rather than changes over time. + +# 3) Types of Anomalies: + +- Point Anomalies: A single data point deviates significantly from the expected behavior in the dataset. These are common across both time-based and non-time-based data, representing sudden outliers or unusual values. +- Contextual Anomalies: A data point is considered + +anomalous only when it is analyzed within a specific context or surrounding data. In time-based data, this could involve seasonal trends or time-of-day variations, whereas in non-time-based data, it could depend on relationships between variables. + +- Subsequence Anomalies: A contiguous sequence of data points behaves abnormally, typically found in time series data. These anomalies are significant when the temporal order of data points plays a key role in detecting deviations from expected patterns. +- Cluster-based and Correlation Anomalies: Anomalies that occur when a group of data points, or relationships between variables, deviate from expected patterns. This is more prominent in non-time-based data, where detecting irregular clusters or correlations between features is essential for AD. + +# B. Data Processing + +Effective AD requires careful preparation and preprocessing of input data to ensure that detection algorithms can operate effectively. In many cases, raw data contains inherent challenges that can significantly hinder the performance of AD models. These challenges arise from the complexity of real-world data, including high dimensionality, missing or sparse values, skewed class distributions, and noise that can obscure true anomalies. Without addressing these issues, AD methods may struggle to accurately identify rare or subtle deviations in the data, leading to false positives, missed anomalies, or inefficient computations. Therefore, appropriate data preprocessing steps are crucial for improving detection accuracy, robustness, and overall system reliability. This subsection outlines some of the most common data processing issues and their implications for AD, along with strategies to mitigate these challenges. + +1) Dimensionality: High-dimensional data makes AD more complex due to the "curse of dimensionality". As datasets expand in size and complexity—particularly with the rise of "big data", characterized by large-scale, high-velocity data generated from diverse sources, it becomes increasingly difficult for AD methods to maintain accuracy [36]. To address this issue, dimensionality reduction is a common approach that transforms a large set of input features into a smaller, more focused feature set [37]. While traditional methods such as PCA [38] are frequently used, they may struggle to capture nonlinear relationships in complex data. For instance, Sakurada et al. [39] compare autoencoders, which perform non-linear dimensionality reduction, with linear PCA and kernel PCA on both synthetic and real-world datasets. The study reveals that on the nonlinear and high-dimensional synthetic Lorenz dataset, AE achieved a relative AUC improvement of $26.83\%$ compared to linear PCA. This highlights that autoencoders can even detect anomalies in data with relatively high intrinsic dimensionality, where linear PCA struggles to perform. +2) Sparsity: Sparse data, where many values are missing or incomplete, poses significant challenges for AD. Sparse datasets can lead to reduced detection accuracy, as missing or incomplete data points may obscure the underlying patterns necessary for detecting anomalies [36]. Cheng et al. + +[40] highlight that in high-dimensional settings, the sparsity problem is further amplified as the data becomes more spread out, increasing the risk of missing critical information that signals anomalies. To address these challenges, Li et al. [41] propose an improved low-rank and sparse decomposition model (LSDM) for hyperspectral AD. Their approach models sparse components as a Gaussian Mixture (MoG), effectively capturing anomalous patterns within complex datasets by leveraging the low-rank structure. In contrast, Han et al. [42] take a different approach by introducing sparse autoencoders to learn sparse latent representations from high-dimensional input data. Through experiments on three real-world cyber-physical system datasets, the study shows that mining sparse latent patterns from high-dimensional time series can significantly improve the robustness of AD models. + +3) Class Imbalance: In most AD tasks, the occurrence of anomalies is significantly rarer than normal data points, resulting in a class imbalance problem. This imbalance can cause detection algorithms to be overly biased toward the majority class (normal data), leading to a higher rate of false negatives where critical anomalies are missed. In imbalanced datasets, it is often possible to achieve an overall high accuracy, while the recall score for the minority class (anomalies) remains very low [43]. Traditional methods to mitigate this issue involve oversampling the minority class or undersampling the majority class [44]. Recent research has increasingly focused on introducing Data Generation Models (DGM) to improve the representation of the minority class in AD. For instance, Dlamini et al. [45] use Conditional Generative Adversarial Networks (CGANs) to generate synthetic samples for the minority class and combines this with KL divergence to guide the model in accurately learning the distribution of the minority class. +4) Noise in Data: Noise refers to random or irrelevant information present in the data, which can obscure true anomalies and lead to false positives. In addition, during the training process of AD models, the high complexity of the model and the presence of noisy data can lead to overfitting, where the model inadvertently learns to fit the reconstruction error from noisy inputs rather than focusing on genuine anomalies [46]. To reduce the impact of noisy data, Zhang et al. [47] incorporate a Maximum Mean Discrepancy (MMD) to encourage the distribution of low-dimensional representations to approximate a target distribution. The goal is to align the distribution of noisy data with that of normal training data, thereby reducing the risk of overfitting. Furthermore, Chen et al. [48] propose a novel method called Noise Modulated Adversarial Learning, where noise images from a predefined normal distribution are fed into the discriminator network as negative samples. This adversarial process modulates the training of the reconstruction network, balancing the learning between the two networks to improve robustness against noise. +5) Privacy of data: In many fields, such as healthcare, finance, and cybersecurity, data used for AD often contains sensitive or personal information. Ensuring the privacy and security of this data is paramount, as improper handling could lead to serious legal and ethical violations. Hassan et al. [49] conducte an in-depth investigation into the privacy of AD + +models in blockchain technology. To address these privacy concerns, Federated Learning (FL), a distributed machine learning paradigm, has emerged as a promising supplement to AD [50]. FL allows distributed clients to collaboratively train a shared model while protecting the privacy of their local data. For example, Idrissi et al. [51] propose Fed-ANIDS, which leverages FL to address the privacy issues associated with centralized Network Intrusion Detection Systems (NIDS). This model was applied to various settings and popular datasets, demonstrating its ability to achieve high performance while preserving the privacy of distributed client data. Cui et al. [52] further introduce GAN into FL and design a new algorithm model that injects controllable noise into local model parameters, ensuring both AD utility and compliance with differential privacy requirements. + +# III. RELATED APPLICATIONS + +With the rapid advancement of deep learning models, AD has become more efficient and adaptable. These sophisticated models have been widely applied across various domains, enhancing the ability to identify irregular patterns in complex and high-dimensional datasets. In the previous chapter, we categorized data based on temporal characteristics into time-series and non-time-series data. However, visual data presents unique challenges, detection requirements, and a wide range of applications, making it difficult to be strictly classified as either time-series or non-time-series data. It can be static (e.g., images) or dynamic (e.g., videos), where images are typically considered non-time-series data, while videos fall under time-series data. Visual data is extensively used in fields such as medical imaging, autonomous systems, and surveillance, where detecting anomalies requires specialized deep learning techniques that differ from traditional numerical or categorical data analysis. To better reflect its broad applications and distinct computational needs, we discuss visual data separately. Based on this classification, we will now explore the applications of deep learning in AD from three perspectives: time-series data, non-temporal data, and visual data. + +# A. Applications in Time Series Data + +Time series data, defined by its sequential nature over time, is fundamental to many systems where the temporal order of events critically influences analysis and decision-making processes. AD in time series data has become an indispensable technique across various industries, enabling the early detection of irregular patterns that may indicate underlying issues or emerging threats. The applications of time series AD are extensive, impacting critical areas such as traffic monitoring, power system management, and healthcare. In the following sections, we present how these applications leverage AD to enhance operational efficiency, ensure system reliability, and improve safety across these fields. + +1) Traffic Monitoring: Time series AD plays a pivotal role in modern traffic management systems. As demonstrated in [53], real-time data from loop detection sensors are integrated and analyzed to predict traffic volume and enhance system safety. The ability to detect anomalies in traffic patterns is + +essential for anticipating and responding to potential incidents before they escalate. For instance, Li et al. [54] present a method that identifies traffic incidents by detecting anomalies in traffic time series data, thereby helping users avoid accidents and reduce travel time. Furthermore, high-speed driving is identified as a significant contributor to traffic accidents [55]. By monitoring and analyzing sudden increases in vehicle speed, AD techniques can predict and prevent accidents more effectively, providing a critical tool for improving road safety. Zhao et al. [56] further validate the efficacy of unsupervised AD methods in assessing elevated road traffic accident risks, specifically by analyzing volume and speed data from traffic on Yan'an elevated road. This approach enhances the ability to detect and respond to hazardous traffic conditions in real-time, underscoring the indispensable role of AD in traffic management. + +2) Power System: AD is a vital element in ensuring the stability, security, and reliability of electrical grids. By continuously monitoring grid data, these techniques can swiftly identify deviations from normal operational patterns, which may indicate issues such as natural faults or malicious cyberattacks. The ability to detect these anomalies in real-time is crucial for preventing potential outages and maintaining a consistent power supply. For instance, Li et al. [57] highlight that accurate and real-time AD can enhance grid stability by over $20\%$ , providing rapid response capabilities that significantly bolster the system's defense against both natural disruptions and cyber threats. Furthermore, the introduction of a residential electrical load AD framework, as demonstrated in [58], has been shown to significantly improve both load prediction accuracy and AD, thereby optimizing demand-side management (DSM) in residential areas. In terms of cybersecurity, the MENSA Intrusion Detection System (IDS) [59] has proven to be a formidable tool in smart grid environments, effectively detecting operational anomalies and classifying a wide range of cyberattacks. This capability not only protects critical infrastructure but also underscores the indispensable role of AD in modern power system management. + +3) Healthcare: AD plays a crucial role in healthcare by enabling continuous monitoring of patient vital signs, such as heart rate and blood pressure, to swiftly identify abnormal conditions that may require urgent medical intervention. The application of AD in medical signal analysis is particularly important, as highlighted in [60], where the identification of data samples that deviate from the typical data distribution can reveal underlying issues such as noise, changes in a patient's condition, or the emergence of new and previously undetected medical conditions. This capability is essential for ensuring accurate diagnosis and timely patient care. Furthermore, Keeley et al. [61] demonstrate that AD algorithms can effectively identify irregularities in heart rate data, which not only facilitates faster emergency responses but also provides deeper insights into a patient's health status. This, in turn, enhances overall patient care while also reducing the cognitive load on healthcare professionals by automating the detection of potential issues. + +# B. Applications in Non-temporal Data + +AD in non-temporal data plays a critical role in ensuring operational integrity, security, and financial stability. By focusing on identifying irregularities within independent events or static datasets, it addresses potential risks such as fraud, system failures, and malicious activities. Unlike time-series applications, non-temporal AD leverages data patterns and statistical analysis to uncover deviations that signal anomalies. In the following, we present specific applications across domains such as finance and cybersecurity, showcasing its significant impact on safeguarding critical systems and operations. + +1) Finance: In the financial sector, non-temporal data AD is pivotal for identifying fraudulent transactions, credit scoring anomalies, and unusual trading activities. Unlike time series data, these financial fraud detection tasks often involve independent events, such as individual transactions or credit score evaluations, which do not rely on temporal sequences. Instead, the focus is on transaction characteristics and patterns that may indicate fraudulent behavior. Various data mining techniques, including SVM, Naïve Bayes, and Random Forest, are extensively employed to detect different forms of financial fraud, such as bank fraud, insurance fraud, financial statement fraud, and cryptocurrency fraud [62]. As highlighted by [63], AD is critical in quickly identifying activities that deviate from normal patterns, thereby enabling rapid intervention to minimize financial losses. + +2) Cybersecurity: AD is a fundamental component of maintaining a secure and resilient cyberspace. As [64] points out, advanced security controls and resilience analysis are crucial during the early stages of system deployment to ensure long-term sustainability. AD plays a pivotal role in this process by identifying unauthorized access, malicious activities, and network intrusions that deviate from established norms. This capability is essential for safeguarding network security and preventing potential breaches. Early research in deep learning-based network intrusion detection focused on architectures such as Autoencoders (AE), Deep Belief Networks (DBN), and Recurrent Neural Networks (RNN) [24]. As deep learning technology has advanced, more sophisticated models have been developed for detecting anomalies in cybersecurity. For instance, Singh et al. [65] illustrate the benefits of AD in wide-area protection schemes (WAPS) by using a deep learning-based cyber-physical AD system (CPADS) to detect and mitigate data integrity and communication failure attacks in centralized Remedial Action Schemes (CRAS). Similarly, Nagarajan et al. [66] highlights the effectiveness of AD in enhancing the security of Cyber-Physical Systems (CPSs) by accurately identifying anomalous behaviors, thereby addressing the growing challenges posed by sophisticated cyberattacks and the increasing volume of data. + +# C. Applications in Visual data + +AD in visual data, encompassing images and videos, plays a vital role in numerous industries where visual inspection is critical. Applications range from detecting defects in manufacturing processes to identifying medical abnormalities in + +imaging, monitoring public safety through surveillance systems, and ensuring quality control in production lines. By leveraging advanced deep learning techniques, AD methods can automatically identify and analyze irregularities with high precision, reducing reliance on manual inspection and improving efficiency. In this section, we explore key applications of visual data-based AD, highlighting its transformative impact across various domains. + +1) Medical Imaging: AD in medical imaging is indispensable across numerous medical specialties, playing a crucial role in the early detection and diagnosis of diseases. In radiology, it is employed to identify anomalies in X-rays [67], brain imaging [68], and CT scans [69], thereby aiding in the accurate diagnosis of various conditions. However, as [70] highlights, anomalies in medical images often closely resemble normal tissue, posing a significant challenge to detection due to their subtle differences. This similarity requires the use of sophisticated techniques to effectively distinguish between normal and anomalous data. For example, Draelos et al. [71] demonstrate the power of machine learning in radiology, significantly enhancing the classification performance for multiple abnormalities in chest CT volumes, achieving an AUROC greater than 0.90 for 18 different abnormalities. Additionally, Shvetsova et al. [72] showcase a novel method for AD in medical images, which dramatically improves the detection of subtle abnormalities in complex, high-resolution images, such as chest X-rays and pathology slides—scenarios where traditional models often fail. Furthermore, Zhao et al. [73] introduce the SALAD framework, which enhances AD in medical images by utilizing self-supervised and translation-consistent features from normal data. This approach is particularly effective in situations where labeled anomalous images are scarce, thereby improving detection accuracy in challenging medical imaging tasks. + +2) Video Monitoring: Video AD (VAD) has become increasingly crucial with the rise of large-scale multimedia data analysis, particularly in the processing of video data [74]. VAD focuses on identifying unusual patterns or behaviors in video footage that deviate from the norm, making it a vital tool in several domains. In security and surveillance, VAD is used to monitor public spaces, buildings, and secure areas, enabling the detection of suspicious activities, unauthorized access, and unusual crowd behaviors, thereby enhancing public safety [75]. In the realm of traffic monitoring, VAD facilitates the real-time identification of accidents and irregular traffic patterns, allowing for prompt response and management [76]. Additionally, VAD is applied in behavioral analysis to detect abnormal behaviors in various environments, such as schools, workplaces, and public transportation systems, contributing to the maintenance of safety and order. For example, Chen et al. [77] propose a bidirectional prediction framework specifically designed for AD in surveillance videos. This innovative approach employs forward and backward prediction subnetworks to predict the same target frame, constructing a loss function based on the real target frame and its bidirectional predictions. Experimental results demonstrate that this model outperforms existing approaches on various surveillance video datasets, including those featuring pedestrians and street scenes, showcas + +ing its superior performance in accurately detecting anomalies in real-world surveillance scenarios. + +# IV. DEEP LEARNING METHODS FOR ANOMALY DETECTION + +The application of deep learning to AD has revolutionized the way we identify irregularities in both time-based and non-time-based datasets [78]. Traditional methods, such as statistical analysis and clustering, have been commonly used to detect anomalies. However, these methods often struggle with high-dimensional data, complex relationships, and capturing intricate patterns. Deep learning models, with their ability to learn hierarchical representations and detect subtle anomalies, have emerged as powerful tools to overcome these limitations. + +As shown in Fig.2, this section introduces three major deep learning approaches applied to AD: reconstruction-based methods, prediction-based methods, and hybrid approaches. Each approach leverages the strengths of deep learning in distinct ways to improve AD accuracy, particularly in scenarios where data patterns are complex, unstructured, or temporal. + +# A. Deep learning methods for Anomaly Detection based on Reconstruction + +Reconstruction-based approaches operate by training a model to learn the underlying distribution of normal data [79]. Once trained, the model attempts to reconstruct incoming data. The reconstruction error, which is the difference between the original data and its reconstruction, is then used as an indicator of anomaly. A high reconstruction error suggests that the data is anomalous, as it deviates from the learned normal patterns. Deep learning-based reconstructive models have become prominent due to their ability to capture complex patterns in high-dimensional data. In recent years, most reconstruction-based AD models have been developed using techniques such as GAN, AE, and diffusion models. These models each have unique strengths and weaknesses, as summarized in Table I. This table consolidates insights from multiple studies, including [80], [81], [82], and [83], which have analyzed the advantages and limitations of GANs, VAEs, and Diffusion Models in AD. In this section, we introduce these three types of models in the context of AD and discuss their various variants. + +1) GAN-based Anomaly Detection: GANs are powerful tools for generating synthetic data that resembles a given training dataset [84]. As shown in the upper part of Fig.3, GANs consist of two main components: a generator and a discriminator, both of which are neural networks. Because of this structure, GAN models are highly flexible, allowing for different networks to be chosen as the generator and discriminator based on the specific task. This flexibility makes GANs a versatile framework for a wide range of applications. The generator $G$ takes a random noise vector $z$ (usually sampled from a Gaussian distribution) as input and generates synthetic data $G(z)$ . The discriminator $D$ receives a data sample (either from the real dataset or from the generator) as input and outputs a probability $D(x)$ , representing the likelihood that the input is real (i.e., from the actual dataset) rather than fake (i.e., + +![](images/0582fc6497eafabed1bd3451b1290a6b09ece498ffa50d1156dd7f0120e507ef.jpg) + +![](images/880d55c7ec7dc69d5f5d9f1f2b32b3c2153e316b56c5560359f5852c0d17b74d.jpg) + +![](images/ab7aedd616ec4505c3fc87c94f267fe1e3ff25684d846743f04d6cdec18a9037.jpg) +Fig. 2. Three types of anomaly detection: (a) Reconstruction-based approache, (b) Prediction-based approache, (c) Hybrid method. + +![](images/7509a938836ca50d09aa01364c29c0e376376912d0ca6818c1dc0151a063308a.jpg) +Fig. 3. Structural Frameworks for GAN Anomaly Detection. + +generated by the generator). The generator and discriminator are trained simultaneously through a process where the generator tries to produce data that can fool the discriminator, and the discriminator tries to improve its ability to distinguish between real and fake data. Table II provides a comprehensive summary of recent GAN-based AD models, categorizing them based on their techniques, approaches, strengths, and weaknesses. This table highlights how different GAN variants are tailored for specific AD tasks, along with the types of data they are applied to and their publication years. + +The training process of GANs can be described as a minimax game with the following objective function: + +$$ +\begin{array}{l} \min _ {G} \max _ {D} V (D, G) = \mathbb {E} _ {x \sim p _ {d a t a} (x)} [ \log D (x) ] \\ + \mathbb {E} _ {z \sim p _ {z} (z)} [ \log (1 - D (G (z))) ]. \quad (1) \\ \end{array} +$$ + +In this function, $p_{data}(x)$ represents the distribution of the real data, $p_z(z)$ represents the distribution of the noise vector $z$ , $G(z)$ is the data generated by the generator, and $D(x)$ is the + +TABLEI COMPARISON OF GANS, VAES, AND DIFFUSION MODELS IN ANOMALY DETECTION + +
ModelStrengthsWeaknesses
GANs• Capable of generating high-fidelity, realistic samples. +• Learns complex data distributions using adversarial loss. +• Useful in AD by distinguishing real vs. generated data.• Prone to mode collapse, leading to low sample diversity. +• Hard to train with difficult-to-interpret losses. +• Training is unstable and hard to converge.
VAEs• Easy to train with one tractable likelihood loss. +• Provides high sample diversity by covering all data modes. +• Latent space representation is useful for AD tasks.• Produces low-fidelity, often blurry samples. +• Pixel-based loss leads to sample ambiguity and blurriness.
Diffusion Models• Generates high-fidelity samples with gradual refinement. +• High sample diversity due to likelihood maximization. +• Intermediate noisy images serve as useful latent codes for AD.• Slow sample generation due to the multi-step denoising process. +• Computationally intensive, requiring many steps for both forward and reverse diffusion.
+ +probability that $x$ is real. The generator $G$ aims to minimize this objective, while the discriminator $D$ aims to maximize it. The discriminator updates its weights to maximize the probability of correctly classifying real and generated data, while the generator updates its weights to minimize the discriminator's ability to distinguish between real and fake data. + +In the context of AD, GANs play crucial roles in both representation learning and data augmentation, each serving distinct purposes within deep Learning [85]. In representation learning, the primary objective of GANs is to learn and model the underlying distribution of the data, enabling the generation of synthetic data that closely resembles real data. This process involves a generator that creates fake data from random noise and a discriminator that distinguishes between real and fake data. Through iterative training, the generator improves its ability to produce realistic data, which is particularly useful in tasks like AD. For example, in [86], GANs are used for representation learning by generating fake data that matches the distribution of normal data. This generated data is then used to train a VAE to detect anomalies through reconstruction errors. Similarly, in [87], a fault-attention generative probabilistic adversarial autoencoder (FGPAA) is proposed, combining GANs and autoencoders for AD by learning the low-dimensional manifold of healthy state data. The GAN component aids in feature representation learning, reducing signal information loss and enhancing the model's ability to detect anomalies through distribution probability and reconstruction error. + +There are two main structures to using GANs for AD, as shown in Fig.3. The first approach is based on the generator, as depicted in the lower part of Fig.3, highlighted by the yellow box. The basic idea is to train the GAN on normal data and then use the reconstruction error to identify anomalies. During the training phase, the GAN is trained exclusively on normal data, allowing the generator to learn to produce data that closely mimics the normal data distribution. During the detection phase, a test data point $x$ is fed into the generator to obtain the reconstructed data $G(x)$ . The reconstruction error, typically measured as the difference between the original data point $x$ and the reconstructed data $G(x)$ , is then used to detect anomalies. This can be quantified using metrics such as mean squared error (MSE). If the reconstruction error exceeds a predefined threshold, the data point is classified as an anomaly. The intuition behind this approach is that the generator, trained solely on normal data, will struggle to accurately reconstruct anomalous data, resulting in a high reconstruction error. + +The mathematical representation for AD using GANs involves computing the reconstruction error $E(x)$ as follows: + +$$ +E (x) = \| x - G (x) \| ^ {2}, \tag {2} +$$ + +where $\| \cdot \| ^2$ denotes the squared Euclidean distance. A threshold $\tau$ is set, and if $E(x) > \tau$ , the data point $x$ is considered an anomaly. For example, Dong et al. [88] propose a semi-supervised approach for video AD using a dual discriminator-based GAN structure, focusing on representation learning. In this approach, the generator predicts future frames for normal events, and anomalies are detected by evaluating the quality of these predictions. Similarly, Guo et al. [89] introduce RegraphGAN, a graph generative adversarial network specifically designed for dynamic graph AD. RegraphGAN utilizes GAN-based representation learning to encode complex spatiotemporal relationships in graph data, allowing it to better capture anomalies. By leveraging encoders to project input samples into a latent space and integrating GANs to enhance both training stability and efficiency, RegraphGAN significantly improves AD performance over existing methods. + +The second approach leverages the discriminator highlighted by the green box in Fig.3. A well-trained discriminator has the ability to differentiate between real (normal) and fake (anomalous) samples. During the detection phase, test samples are directly input to the discriminator, which evaluates the likelihood that a given sample is real. If the discriminator assigns a low probability to a sample, suggesting that it is likely fake or anomalous, the sample is flagged as an anomaly. This method relies on the discriminator's capacity to recognize deviations from the normal data distribution it learned during training. For instance, Liu et al. [90] propose a GAN framework that uses multiple generators to produce potential outliers, which are then distinguished from normal data by a discriminator to detect anomalies. The discriminator's output score is used to evaluate the anomaly degree of input data, providing a comprehensive reference distribution and preventing mode collapse. + +Additionally, GANs are highly effective in data augmentation, helping to mitigate the scarcity of anomaly samples, which often results in data imbalance and poor generalization [91]. When anomaly samples are unevenly distributed or lacking in diversity, models struggle to learn rare anomalies and can overfit to the training set, reducing their accuracy on unseen data. Traditional data augmentation techniques—such as scaling, rotation, random cropping, translation, flipping, and copy-paste—attempt to mitigate these issues. However, simple linear transformations fail to capture new distributions and features of unknown anomalies, such as random changes in + +shape or texture. This is where GANs provide a significant advantage. By generating synthetic anomaly data that mimics the distribution of real-world anomalies, GANs enable models to learn a more diverse set of anomaly features. This not only addresses the imbalance problem but also improves the model's generalization capabilities, as it learns to detect anomalies based on a broader range of characteristics beyond those present in the original training dataset. Miao et al. [92] introduce an unsupervised AD framework that uses data augmentation through contrastive learning and GANs to mitigate overfitting. By employing a geometric distribution mask, it enhances data diversity and generates synthetic anomaly samples, addressing the scarcity of anomaly data. In [93], Anomaly-GAN addresses data augmentation by using a mask pool, anomaly-aware loss, and local-global discriminators to generate high-quality, realistic synthetic anomalies with diverse shapes, angles, spatial locations, and quantities in a controllable manner. Li et al. [94] propose augmented time regularized generative adversarial network that combines an augmented filter layer and a novel temporal distance metric to generate high-quality and diverse artificial data, addressing the limitations of existing GAN approaches in handling limited training data and temporal order. + +2) AE-based Anomaly Detection: In recent years, the limitations of traditional AE models in handling complex and noisy data have become more apparent, leading to the development of enhanced methods to improve their performance in AD tasks. For example, Fan et al. [97] introduce a new framework by incorporating $\ell_{2,1}$ -norm into the AE, and experiments have demonstrated that this framework can significantly improve ADn accuracy by increasing the model's robustness to noise and outliers during training. Wang et al. [98] demonstrate that introducing an adaptive-weighted loss function can effectively suppress anomaly reconstruction, thereby improving the accuracy of AD. Liu et al. [99] introduce a multi-scale convolutional AE architecture, where multiple stacked convolutional encoder-decoder layers act as background learners to robustly eliminate anomalies of varying sizes during background reconstruction. Additionally, Lin et al. [100] introduce a soft calibration strategy combined with AE to address the issue of data contamination in AD. + +VAEs are another generative model widely used in AD tasks. Like GANs, VAEs aim to learn the distribution of normal data to identify anomalies. However, unlike GANs, which rely on adversarial training between a generator and a discriminator, VAEs use an encoder-decoder architecture. Fig.4 illustrates the structure of AD based on VAE. The goal of a VAE is to map the input data into a latent space through the encoder and model the data distribution probabilistically within this space. This approach allows the VAE to generate new data that closely resembles the true data distribution, and anomalies can be detected by evaluating the reconstruction error. + +The internal structure of a VAE is similar to that of a traditional AE but with some key differences. First, the encoder in a VAE not only compresses the input data into a lower-dimensional latent space but also learns a probabilistic distribution, typically parameterized by a mean $\mu$ and a vari + +![](images/fd57f4067bb6f3c6e9b25b5ccfc25170dec2d8afbbeb522aa5c35f6a09e5a7e3.jpg) +Fig. 4. Structural Frameworks for VAE Anomaly Detection. + +ance $\sigma^2$ as shown in Fig.4. This enables the VAE to generate more meaningful latent variables $z$ , enhancing the diversity and robustness of the generated data. A critical component introduced in VAEs is the Kullback-Leibler (KL) divergence, which measures the difference between the latent distribution generated by the encoder and a predefined prior distribution (usually a standard normal distribution). Unlike traditional AEs, which focus solely on minimizing the reconstruction error, VAEs are trained by minimizing a combination of the reconstruction error and the KL divergence: + +$$ +\mathcal {L} _ {\mathrm {V A E}} = \mathbb {E} _ {q (z | x)} [ \log p (x | z) ] - D _ {\mathrm {K L}} (q (z | x) \| p (z)). \tag {3} +$$ + +This difference makes VAEs more powerful in AD because they not only consider the quality of the data reconstruction but also enforce a structured latent space through the KL divergence. By doing so, KL divergence helps to regularize the latent space, ensuring that the encoded representations are smoothly distributed and centered around the prior distribution. This regularization reduces overfitting, promotes better generalization, and makes it easier to distinguish between normal and anomalous data, especially in complex and high-dimensional datasets. Table III provides a comprehensive summary of the latest advancements in VAE-based AD models, showcasing innovative enhancements that address various challenges such as noise robustness, semantic feature learning, and anomaly reconstruction. Huang et al. [101] enhance VAE-based AD by incorporating an Autoencoding Transformation into the model, which ensures that the training phase effectively captures high-level visual semantic features of normal images, thereby increasing the anomaly score gap between normal and anomalous samples. Similarly, Yin et al. [102] utilize Convolutional Neural Network (CNN) and VAE with a two-stage sliding window approach in data preprocessing to learn better representations for AD tasks. Zhang Yin et al. [103] propose the Graph Relational Learning Network (GReLeN), which integrates a VAE structure with graph dependency learning for AD in multivariate time series through reconstruction. Zhou et al. [104] propose a variational long short-term memory (VLSTM) model for high-dimensional AD in imbalanced datasets, combining a compression network for efficient data representation with an estimation network for accurate classification of network traffic data. The VLSTM model balances data compression and feature retention using core LSTM and variational modules. + +In recent years, many advancements in AD models inspired by VAEs have focused on Adversarial Autoencoders (AAEs) + +TABLE II GAN-BASED MODELS IN ANOMALY DETECTION + +
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[90]GANReconstructionDoes not depend on assumptions about the normal data and requires less computing resources.The method involves the selection of multiple hyperparameters, making the tuning process challenging and potentially time-consuming.Structured data2020
[48]GAN+CNNPredictionThe NM-GAN model enhances both the generalization and discrimination abilities through noise-modulated adversarial learning, resulting in improved accuracy and stability for video AD.The model struggles to fully capture complex temporal patterns like staying, wandering, and running, and lacks adaptive modulation of generalization and discrimination abilities, leaving room for improvement in spatiotemporal feature learning.Video data2021
[94]GANReconstructionIs capable of generating more effective artificial samples for training supervised learning models, thereby addressing the issue of data imbalance.Its performance is inferior to the baseline algorithms when the balanced ratio is 0.125.Image data2021
[95]GAN+LSTMPredictionThe TMANomaly framework excels in capturing complex multivariate correlations in industrial time series data, enhancing AD accuracy through mutual adversarial training.The paper lacks discussion on TMANomaly's generalization to other datasets, the potential limitations of using GRA for feature selection, and the computational efficiency or scalability, which are critical for real-time industrial systems.Multivariate time series data2022
[96]GAN+LSTMPredictionFGANomaly method effectively filters anomalous samples before training, improving AD accuracy and robustness by precisely capturing normal data distribution and dynamically adjusting generator focus.The method lacks effective fusion of information across different dimensions in multivariate time series, which limits its ability to fully capture complex correlations.Multivariate time series data2022
[93]GANReconstructionImproves the quality of the generated anomaly images and generates anomalies with different shapes, rotation angles, spatial locations, and numbers in a controllable manner.The images generated are not very sensitive to the change of light.Image data2023
[89]GANReconstructionImproves training efficiency and stability in dynamic graph AD while avoiding the expensive optimization process typical of traditional graph generative adversarial networks.The detection accuracy on the UCI Message dataset is lower than that of TADDY.Dynamic graph data2023
[92]GAN+TransformerReconstructionIt can effectively detect anomalies in long sequences, mitigates overfitting, and incorporates contrastive loss into the discriminator to fine-tune the GAN, ensuring strong generalization ability.It may struggle with irregularly sampled data or datasets with many missing values, requires careful tuning of several hyperparameters, and demands significant computational resources, posing challenges for real-time processing on limited-capacity devices.Multivariate time series data2024
+ +[105]. Unlike traditional VAEs, which use KL divergence to match the latent space distribution to a prior, AAEs achieve this through the use of GANs. Specifically, AAEs employ a GAN's discriminator to evaluate the latent variable distribution produced by the encoder and use adversarial training to align it with the desired prior distribution, providing more flexible control over the quality of the generated data. Wu et al. [87] propose the Fault-Attention Generative Probabilistic Adversarial Autoencoder (FGPAA) for machine AD, utilizing an end-to-end AAE with double discriminators to extract relevant features and ensure accurate equipment health monitoring through a fault-attention probability distribution. Idrissi et al. [51] apply AAE and FL in the field of network intrusion detection, effectively ensuring AD performance while safeguarding client privacy. Experimental results demonstrate that the proposed model outperforms AE, VAE, and AAE on various network traffic datasets, achieving high performance across different metrics. Su et al. [106] propose two contamination-immune BiGAN models, integrating elements of VAE and BiGAN to create a new AAE-based framework that effectively detects anomalies by learning the probability distribution of normal samples from contaminated datasets, significantly outperform + +ing state-of-the-art methods in scenarios where training data is impure. Similar to the aforementioned AAE models, Du et al. use GANs to purify the original dataset, generating synthetic "normal" data to improve outlier detection accuracy. Continuing the advancements in AAE-based models, Yu et al. [107] introduce an Adversarial Contrastive Autoencoder (ACAE) for Multivariate Time Series (MTS) AD, which enhances feature representation through adversarial training and contrastive learning, demonstrating superior performance across multiple real-world datasets, further extending the application of AAE-based methods in robust AD. + +3) Diffusion model-Based for Anomaly Detection: Diffusion models are a type of generative model that operate through two key phases: a fixed forward diffusion process and a learnable reverse diffusion process [108]. Mathematically, the forward process involves progressively adding Gaussian noise to the data $x_0$ , transforming it into pure noise $x_T$ over $T$ steps. This process can be described as: + +$$ +q \left(x _ {t} \mid x _ {t - 1}\right) = \mathcal {N} \left(x _ {t}; \sqrt {1 - \beta_ {t}} x _ {t - 1}, \beta_ {t} I\right), \tag {4} +$$ + +where $q(x_{t}|x_{t - 1})$ is the conditional probability distribution of $x_{t}$ given $x_{t - 1}$ , $\beta_{t}$ is the noise variance at step $t$ , and $x_{t}$ + +TABLE III AUTOENCODER-BASED MODELS IN ANOMALY DETECTION + +
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[104]VAE-based (VAE+LSTM)ReconstructionEffectively addresses imbalanced and high-dimensional challenges in industrial big data.Falls short in achieving the highest AUC and F1 scores compared to other methods.Industrial big data2020
[87]AAE-basedReconstructionFGPAA reduces information loss during feature extraction and constructs fault attention anomaly indicators using low-dimensional feature probability and reconstruction error.Runtime is approximately five times longer than SOM.Rotating machine fault simulator data2020
[98]AE-based (AE+CNN)ReconstructionThe Auto-AD method enables fully autonomous hyperspectral AD, automatically separating anomalies based on reconstruction errors without the need for manual tuning or additional processing.Lower AUC score compared to the GRX method on the Honghu dataset.Hyperspectral data2021
[99]AE-based (AE+CNN)ReconstructionMSNet offers an effective solution to handle multiscale anomaly shapes, providing greater flexibility without the need for threshold fine-tuning.Multiple convolutional encoder-decoder layers and enhanced training increase computational cost and training time.Hyperspectral data2021
[101]VAE-based (VAE+Transformer)ReconstructionSSR-AE leverages self-supervised learning to enhance normal data reconstruction and hinder abnormal data, optimizing mutual information for effective transformation and image reconstruction.Struggles with transformations, heavily relying on their effectiveness for AD.Image data2021
[97]AE-basedReconstructionMaintains geometric structure and local spatial coherence of hyperspectral images (HSI), reducing search space and execution time per pixel.High execution time for constructing the SuperGraph matrix with large datasets.Hyperspectral data2021
[51]AAE-based (AAE+Federated learning)ReconstructionFed-ANIDS demonstrates strong generalization, outperforms GAN-based models, and ensures privacy protection through federated learning.Computational overhead due to the federated learning framework, increasing training complexity and latency.Cybersecurity data2023
[100]AE-basedReconstructionApplicable for time series AD under data contamination.Assumes normal samples follow a Gaussian distribution, limiting applicability, and has higher computational complexity.Time series data2024
[106]AAE-basedReconstructionLearns the probability distribution of normal samples from contaminated datasets, achieving convergence and outperforming baseline models.Relies on the assumption that the contamination ratio is known, which may not always be accurate in practice.Medical image data2024
[86]AAE-basedReconstructionGenerates a clean dataset from contaminated data for AD, with linear scalability for larger datasets.Struggles with detection accuracy in datasets with multiple distribution patterns.Tabular data2024
[107]AAE-basedReconstructionExcels in learning high-level semantic features and capturing normal patterns of MTS with contrastive learning constraints, ensuring stability across parameter settings.Performance on all metrics for SMAP and PSM datasets is lower than baseline methods.Multivariate time series data2024
+ +represents the noisy data at step $t$ . As $t$ increases, the data becomes more corrupted by noise until it reaches a state of pure Gaussian noise at step $T$ . + +The reverse process learns to gradually denoise the data, removing the added noise step by step. The model learns a parameterized distribution $p_{\theta}(x_{t - 1}|x_t)$ to reverse the noise addition process, reconstructing the original data from the noisy data. This reverse process is trained to minimize the variational bound on the data likelihood, expressed as: + +$$ +L = \mathbb {E} _ {q} \left[ D _ {K L} \left(q \left(x _ {t - 1} \mid x _ {t}, x _ {0}\right) \mid p _ {\theta} \left(x _ {t - 1} \mid x _ {t}\right)\right) \right]. \tag {5} +$$ + +By progressively removing noise, diffusion models generate high-fidelity samples, first capturing coarse structures and then refining details in each step. In the context of AD, diffusion models are trained on normal data to learn the underlying data distribution through an iterative noise-removal process. Similar to other reconstruction-based methods, anomalies can be identified by evaluating the reconstruction error, where a higher error indicates that the data deviates from the learned normal patterns. + +Diffusion models stand out from GANs and VAEs in several key ways. They avoid common issues such as mode collapse in GANs, where only a subset of the data distribution is captured, leading to reduced diversity. Diffusion models also overcome the blurriness associated with VAEs, which often results from pixel-based loss and a smaller latent space. By iteratively denoising data, diffusion models maintain both high fidelity and diversity in their outputs. + +While diffusion models are slower in generating samples due to their iterative nature, their ability to accurately reconstruct data and cover the full range of the training dataset makes them particularly well-suited for AD [109]. In AD, where precision is critical, diffusion models excel by generating detailed and high-quality samples, enabling them to identify subtle deviations from normal patterns with greater accuracy than other generative models. Several works have leveraged the advantages of diffusion models in ADn. For example, Zhang et al. [110] utilize the high-quality and diverse image generation capabilities of diffusion models to enhance reconstruction quality in DiffAD, addressing the limitations of + +traditional methods by introducing noisy condition embedding and interpolated channels. Similarly, Li et al. [111] apply a diffusion model to reconstruct normal data distributions and integrate an auxiliary learning module with pretext tasks to better distinguish between normal and abnormal data. Expanding on these ideas, Zeng et al. [112] improve denoising diffusion probabilistic models (DDPMs) for radio AD by incorporating an AE to learn the distribution of normal signals and their power spectral density (PSD), using reconstruction error to identify anomalies. Li et al. [113] present a Controlled Graph Neural Network (ConGNN) approach based on DDPMs to address the challenge of limited labeled data. Li et al. [114] further explore diffusion models in vehicle trajectory AD, employing decoupled Transformer-based encoders to capture temporal dependencies and spatial interactions among vehicles, significantly improving AUC and F1 scores on real-world and synthetic datasets. Similarly, Pei et al. [115] establish the two-stage diffusion model (TSDM) to mitigate the influences of anomalies in smart grids, where the first stage is a diffusion-based AD component. In multi-class AD, He et al. [116] propose DiAD, a framework that enhances reconstruction accuracy through a combination of a semantic-guided network, spatial-aware feature fusion, and a pre-trained feature extractor to generate anomaly maps. + +# B. Deep learning methods for Anomaly Detection based on Prediction + +Prediction-based AD methods operate by forecasting future values or estimating missing attributes and comparing these predictions to the actual observed values. When significant deviations occur, it indicates potential anomalies, as the data deviates from the learned normal patterns. These methods are versatile and can be applied across various data types, leveraging relationships between variables or temporal correlations to detect anomalies. Prediction-based methods excel in scenarios where capturing patterns and trends is essential. By learning underlying structures in the data, whether based on time dependencies or more general interactions between variables, these methods can effectively predict expected outcomes. Deviations from these expectations are flagged as anomalies. This makes prediction-based approaches highly adaptable, capable of functioning across different contexts, including various types of data. In this section, we explore three main approaches for prediction-based AD: Recurrent Neural Networks (RNNs), attention mechanisms, and Graph Neural Networks (GNNs), all of which have demonstrated efficacy in capturing intricate patterns and relationships within data to identify anomalies. These methods allow for flexible and robust AD across various data types by learning underlying patterns, whether they are based on spatial, temporal, or graph-based relationships. By leveraging these approaches, prediction-based methods can effectively model complex interactions, providing reliable detection of unexpected behaviors or deviations from learned patterns. + +1) RNN-based Anomaly Detection: Recurrent Neural Networks (RNNs) [117] are a special type of neural network designed to process sequential data by capturing dependencies between elements in a sequence. Unlike standard neural + +networks, RNNs incorporate a state vector $s_t$ in the hidden layer, allowing them to retain information from previous steps and model sequential patterns. This capability makes them effective in various applications where data has an inherent order, such as event logs, system monitoring, and structured sequences in cybersecurity or industrial processes. For an input $x_t$ at time $t$ , the update of the state value $s_t$ and hidden layer output $h_t$ in RNNs can be represented as + +$$ +\boldsymbol {s} _ {t} = \sigma \left(\boldsymbol {W} ^ {x} \boldsymbol {x} _ {t} + \boldsymbol {W} ^ {s} \boldsymbol {s} _ {t - 1} + \boldsymbol {b} ^ {s}\right) \tag {6} +$$ + +$$ +\boldsymbol {h} _ {t} = \operatorname {s o f t m a x} \left(\boldsymbol {W} ^ {h} \boldsymbol {s} _ {t} + \boldsymbol {b} ^ {h}\right), +$$ + +where $\sigma(\cdot)$ is the sigmoid activation function, $W^x$ , $W^s$ and $W^h$ represent the network weights, and $b$ is the network biases. By maintaining a recurrent state, RNNs can effectively capture dependencies across different steps within a sequence, making them well-suited for tasks involving ordered data. + +However, RNNs face the problem of exploding or vanishing gradients when dealing with long sequences. Long Short-Term Memory networks (LSTMs) [118], a specialized type of RNN, were introduced to address these issues. Specifically, LSTMs replace the hidden layer of RNNs with an LSTM block consisting of input, output, and forget gates. The inference process of LSTM at time $t$ is given by + +$$ +\boldsymbol {f} _ {t} = \sigma \left(\boldsymbol {W} ^ {x f} \boldsymbol {x} _ {t} + \boldsymbol {W} ^ {h f} \boldsymbol {h} _ {t - 1} + \boldsymbol {b} ^ {f}\right) +$$ + +$$ +\boldsymbol {i} _ {t} = \sigma \left(\boldsymbol {W} ^ {x i} \boldsymbol {x} _ {t} + \boldsymbol {W} ^ {h i} \boldsymbol {h} _ {t - 1} + \boldsymbol {b} ^ {i}\right) +$$ + +$$ +\tilde {\boldsymbol {c}} _ {t} = \tanh \left(\boldsymbol {W} ^ {x \tilde {c}} \boldsymbol {x} _ {t} + \boldsymbol {W} ^ {h \tilde {c}} \boldsymbol {h} _ {t - 1} + \boldsymbol {b} ^ {\tilde {c}}\right) \tag {7} +$$ + +$$ +\boldsymbol {c} _ {t} = \boldsymbol {f} _ {t} \boldsymbol {c} _ {t - 1} + \boldsymbol {i} _ {t} \tilde {\boldsymbol {c}} _ {t} +$$ + +$$ +\boldsymbol {o} _ {t} = \sigma \left(\boldsymbol {W} ^ {x o} \boldsymbol {x} _ {t} + \boldsymbol {W} ^ {h o} \boldsymbol {h} _ {t - 1} + \boldsymbol {b} ^ {o}\right) +$$ + +$$ +\boldsymbol {h} _ {t} = \boldsymbol {o} _ {t} \tanh \left(\boldsymbol {c} _ {t}\right), +$$ + +where $f_{t}$ , $i_{t}$ , and $o_{t}$ are the forget, input and output gate weights, respectively. $c_{t}$ represents the cell state of LSTM, and $\tanh(\cdot)$ is the hyperbolic tangent activation function. By controlling the weights of the forget, input, and output gates, LSTM determines the importance of historical time series information and the current input on the current output, thus effectively mitigating issues of gradient vanishing and allowing robust modeling of complex sequences. Reference [119] provides comprehensive evidence of LSTM's effectiveness in AD across various technical systems, demonstrating its superiority in learning complex temporal behaviors and accurately identifying anomalies. + +The Gated Recurrent Unit (GRU) [120] is a simplified version of LSTM that only includes an update gate and a reset gate and uses the hidden state alone to represent both short-term and long-term information. These different types of RNNs can be used in prediction-based AD tasks, with the specific detection and inference method illustrated in Fig. 5. RNNs, LSTMs, and GRUs take time series data from $t - w$ to $t - 1$ as input, and their pre-trained neural networks use these temporally ordered data to predict the single-step or multi-step future values of the univariate or multivariate time series. If the difference between the actual and predicted values is below a threshold, no anomaly is detected; if the difference exceeds the threshold, an anomaly is detected and the spatiotemporal location of the anomaly is identified. + +TABLE IV DIFFUSION-BASED MODELS IN ANOMALY DETECTION + +
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[110]DiffusionReconstructionThe latent diffusion model (LDM) used in this method achieves state-of-the-art performance in surface AD by generating high-quality, semantically correct reconstructions, effectively avoiding overfitting to anomalies.It less suitable for real-time applications or environments with limited computational resources.Image data2023
[112]Diffusion+VAEReconstructionThe AE-DDPMs algorithm effectively improves stability and reduces computational costs in radio AD, outperforming GAN-based methods in complex electromagnetic environments.The anomalies in the experimental data are artificially generated, rather than originating from real-world conditions, which may limit the model's applicability to genuine, real-world scenarios.radio signal data2023
[113]Diffusion+GNNPredictionConGNN effectively addresses the issue of limited labeled data by generating augmented graph data using a graph-specific diffusion model.The reliance on graph-specific augmentation might not generalize well to other types of data, potentially limiting its applicability beyond graph-based AD.Image data2023
[111]Diffusion+VAEHybridSDAD effectively enhances AD by combining self-supervised learning for discriminative data representation with denoising diffusion.The generation of pseudo anomalies relies solely on standard Gaussian sampling, which may not fully capture the complexity of real anomalies, limiting the model's ability to accurately simulate genuine abnormal data.Structure data2024
[114]Diffusion+TransformerHybridDiffTAD effectively models temporal dependencies and spatial interactions in vehicle trajectories through diffusion models, significantly improving AD accuracy and robustness to noise.The anomalies are primarily evaluated on synthetic datasets, which may not fully reflect the complexity and diversity of real-world trajectory data.Vehicle trajectory data2024
+ +![](images/776e31311c1c4c6521d678b5659cbccf97515cff7d507f59b7a11d83bd281e30.jpg) +Fig. 5. RNN-based application example for time series data anomaly detection: (a) RNN-based, (b) LSTM-based, (c) GRU-based. + +Current RNN-based AD primarily focuses on improving RNN algorithms tailored to AD tasks and integrating RNN with other methods for AD. The method in [121] employs a pruning algorithm to reduce the number of false data points, enabling the LSTM-based AD approach to better address the challenges posed by the extremely uneven distribution of railway traffic data. LSTM combined with AE [122], VAE [123], and Singular Value Decomposition (SVD) [124] has also been used to identify anomalies in Controller Area Networks (CANs) [125], electrocardiograms, and Internet monitoring data. GANs based on adversarial learning have also been integrated into the time series learning of LSTM, achieving very high performance in scenarios with few features [95], extremely imbalanced training sets, and noise interference [96]. CNN is also integrated into LSTM in a serial [126], parallel [127], or as a foundational layer [128] to better extract the spatiotemporal correlations of multidimensional time series, thereby enhancing the performance of AD. GRUs, compared to LSTMs, have a more streamlined architecture, resulting in lower computational complexity during training and execution of AD tasks, and they tend to perform better on certain less complex sequential data. For instance, GRUs enhance interpretability by uncovering latent correlations in + +multivariate time series data from industrial control system sensors [129]. Similar to LSTMs, GRUs can also be combined with AEs [130] or VAEs [25] in an encoder-decoder architecture to mitigate the effects of noise and anomalies, thereby improving the accuracy of AD. + +2) Attention-based Anomaly Detection: The attention mechanism was initially applied in machine translation [131], with its core idea being to enable the neural network to focus on the relevant parts of the input values. While attention-based methods have shown great promise in time series AD, their applications are not limited to temporal data. These methods can effectively capture dependencies in various types of data, including spatial, spatiotemporal, and multimodal datasets. This flexibility broadens their use cases across different AD tasks. Compared to RNN-based approaches, they are better suited for long or complex sequences because attention can compute dependencies between all positions in the sequence simultaneously, while RNNs process sequences sequentially, step by step. + +Figure 6 illustrates a typical attention-based model for AD. Among attention-based methods, the self-attention mechanism is particularly effective in capturing global dependencies across various types of sequential data, including temporal, + +![](images/019b206e929dfe093eeec77a1d42acc96182676e7e7c79b8a51a4f5bc4ca29c2.jpg) +Fig. 6. Attention-based model for anomaly detection. The model first embeds sequential data using input embedding and positional encoding to preserve temporal dependencies. The multi-head attention mechanism captures long-range dependencies by processing interactions between all time steps. The feedforward layer then refines feature representations, and a dense interpolation layer enhances anomaly-related features before passing them to a fully connected network (FNN) for final AD. + +spatial, and spatiotemporal inputs. For an input dataset $\mathbf{X} = [x_{1}, x_{2}, \dots, x_{t}]$ , the queries, keys, and values are defined as: $Q = X W_{Q}$ , $K = X W_{K}$ , and $V = X W_{V}$ , where $W_{Q}$ , $W_{K}$ , and $W_{V}$ are trainable weight matrices. The attention weights are then computed based on $Q$ , $K$ , and $V$ as + +$$ +\alpha_ {i j} = \frac {\exp \left(\boldsymbol {Q} _ {i} \boldsymbol {K} _ {j} ^ {\top} / \sqrt {\boldsymbol {d} _ {k}}\right)}{\sum_ {j = 1} ^ {T} \exp \left(\boldsymbol {Q} _ {i} \boldsymbol {K} _ {j} ^ {\top} / \sqrt {\boldsymbol {d} _ {k}}\right)}, \tag {8} +$$ + +where $d_k$ is the dimension of the keys. Finally, the output of the self-attention-based neural network, which takes into account the importance of each input value, is given by Attention $(Q, K, V) = \alpha V$ . + +To enable the model to capture features of various patterns, multi-head attention is also well-suited for AD. The calculation of multiple heads is expressed as + +$$ +\operatorname {M u l t i h e a d} \left(\boldsymbol {Q}, \boldsymbol {K}, \boldsymbol {V}\right) = \operatorname {C o n c a t} \left(\operatorname {h e a d} _ {1}, \dots , \operatorname {h e a d} _ {h}\right) \boldsymbol {W} _ {O}, \tag {9} +$$ + +where each head is computed as $\mathrm{head}_i =$ Attention $(\mathbf{Q}\mathbf{W}_{Q_i},\mathbf{K}\mathbf{W}_{K_i},\mathbf{V}\mathbf{W}_{V_i})$ . Here, $W_{Q_i}$ $W_{K_i}$ and $W_{V_i}$ are trainable parameters for different heads, and $W_{O}$ is the linear transformation matrix for the output. Concat(head1,,headh) concatenates the outputs of all attention heads along the feature dimension. Attention-based methods can effectively capture long-term dependencies, improve computational efficiency, and enhance the interpretability of AD through visualized attention weight values. When applied to AD, differences in the distribution of attention weights between normal and anomalous time series can serve as the basis for AD. + +In the field of AD, particularly for time series data, there has been a growing number of studies proposing deep learning methods based on attention mechanisms. Autoencoders that combine convolution, LSTM, and self-attention mechanisms can better extract complex features from multivariate time series data and robustly detect anomalies in high noise conditions [132]. The Transformer, as a well-known attention-based model, has demonstrated superior performance in unsupervised prediction-based time series AD compared to LSTM, as it can learn the dynamic patterns of sequential data through self-attention mechanisms [133]. The Transformer-based AD utilizes attention-based sequence encoders for rapid inference, achieving an F1 score improvement of up to $17\%$ + +on public datasets and reducing training time by as much as $99\%$ compared to the baseline [134]. Despite its outstanding capabilities, the Transformer still faces certain bottlenecks in AD. Attention-based methods are prone to overfitting when data is insufficient. The method in [92] seamlessly integrates contrastive learning and GAN into the Transformer, utilizing data augmentation techniques and geometric distribution masking to expand the training data, thereby enhancing data diversity and improving accuracy by $9.28\%$ . + +Attention mechanisms are also frequently applied in graph neural networks to jointly detect anomalies in time series data. Reference [135] proposes a novel efficient Transformer model based on graph learning methods, employing two-stage adversarial training to train the AD model and utilizing prototypical networks to apply the model to anomaly classification. A contrastive time-frequency reconstruction network for unsupervised AD is used for AD and localization [136], where attention mechanisms and graph convolutional networks update the feature information of each time point, combining points with similar feature relationships to dilute the influence of anomalous points on normal points. Reference [137] models the correlations between temporal variables using graph convolutional networks, while also using an attention-based reconstruction model to output the importance of time series data within each time window, achieving an average AD F1 score exceeding 0.96. For multimodal data, a multimodal graph attention network (M-GAT) and temporal convolutional networks are used to capture spatial-temporal correlations in multimodal time series and correlations between modalities [138], ultimately outputting anomaly scores through reconstruction or prediction. More details about the application of GNNs in AD will be elaborated in the next subsection. + +In addition to GNNs, CNNs can also incorporate attention mechanisms to enhance various metrics of AD. Reference [139] effectively captures the local features of subsequences by leveraging the locality of CNNs and combining it with positional embeddings. At the same time, Zhu et al. [139] employ attention mechanisms to extract global features from the entire time series, thereby enhancing the effectiveness and potential of detection. Many works have also introduced LSTM to extract temporal correlations in time series data based on CNN models with attention mechanisms. For example, Sun et al. [140] employ a sequential approach where 1D convolution is + +first used to extract abstract features of the signal values at each time step, which are then input into a bidirectional long short-term memory network (Bi-LSTM), ultimately combining with attention mechanisms to make the model focus on locally important time steps. Meanwhile, Le et al. [141] integrate convolutional layers, LSTM layers, and self-attention layers into an autoencoder architecture to better extract complex features from multivariate time series. Similarly, Pei et al. [126] employ additional SVM to classify the attention weights based on a CNN-LSTM model with attention mechanisms to determine whether cyber-attacks have occurred in energy systems. The input data are the multimodal measurements from the deployed sensors. + +3) GNN-based Anomaly Detection: Graph Neural Networks (GNNs) have gained increasing attention in AD tasks, as many types of data can be naturally represented as graph structures [142]. Wu et al. [143] have demonstrated the effectiveness of GNNs in identifying anomalies within complex graph-structured data environments. As neural network models specifically designed to handle graph-structured data, GNNs define nodes, edges, and graphs, where nodes represent individual elements in the dataset, such as data points in a sequence, sensor readings in multivariate data, or entities in relational datasets—denoted as the set $V$ . Edges capture the relationships or dependencies between these elements, denoted as the set $E$ , and can represent temporal correlations, spatial dependencies, or more abstract relational connections depending on the context. The graph, represented as $G = (V, E)$ , captures the overall structure formed by nodes and edges. The primary operations in GNN training are message passing and aggregation, which are used to update and learn node features. Specifically, during message passing, each node receives information from its neighboring nodes and updates its own state. For a node $v$ , the message passing formula is given as + +$$ +\boldsymbol {m} _ {v} ^ {(k)} = \sum_ {u \in \mathcal {N} (v)} M S G \left(\boldsymbol {h} _ {u} ^ {(k - 1)}, \boldsymbol {h} _ {v} ^ {(k - 1)}, \boldsymbol {e} _ {u v}\right), \tag {10} +$$ + +where $\mathcal{N}(v)$ denotes the set of neighboring nodes of $v$ , $h_u$ and $h_v$ are the features of nodes $u$ and $v$ at layer $k$ , and $e_{uv}$ represents the edge features. Subsequently, the received messages are aggregated with the current node state, and the node features are updated as + +$$ +\boldsymbol {h} _ {v} ^ {(k)} = \text {U P D A T E} \left(\boldsymbol {h} _ {v} ^ {(k - 1)}, \boldsymbol {m} _ {v} ^ {(k)}\right), \tag {11} +$$ + +where $UPDATE(\cdot, \cdot)$ is the update function. + +As illustrated in Fig. 7, which uses time series data as an example, GNNs treat each variable in the multivariate time series as a node to capture complex relationships between different dimensions. While the primary focus here is on the predictive capabilities of GNNs, it is worth noting that they are also effective in reconstruction-based AD. The final decision on whether the input sequence is anomalous is primarily based on prediction errors or graph structure differences, with reconstruction errors serving as a supplementary indicator. GNN-based AD methods excel at modeling complex dependencies between time steps or sensors, offering flexibility to handle + +![](images/3330583da44f04aadda892ec09bf36e2ea653e3123da2117b4ea223ff767ce02.jpg) +Fig. 7. GNN-based method for anomaly detection with time series data. Time series data is embedded into a graph structure, where a spatial-temporal GNN extracts dependencies. The reconstruction module then estimates the original data. Anomalies are detected based on graph relational discrepancies (differences in predicted graph structure) and prediction discrepancies (differences between reconstructed and actual time series). + +both static and dynamic relationships across diverse time series structures. However, they still face challenges such as high computational complexity on large-scale graphs and difficulties in constructing optimal edge and graph configurations [144]. + +In prediction-based GNN for AD, GDN [145] is a representative work that combines a structure learning approach with GNN, additionally using attention weights to predict time series values and detect anomalies based on the predictions. Similar methods include GTA [146] and CST-GL [147]. Furthermore, Liu et al. [148] propose a GNN-based contrastive learning model that generates prediction scores from high-dimensional attributes and local structures to detect anomalies, outperforming state-of-the-art methods on seven benchmark datasets. Beyond prediction-based methods, there are also reconstruction-based GNN approaches. For example, MTAD-GAT [149] employs a graph attention network as a spatiotemporal encoder to learn dependencies across variables and time, reconstructing the time series with a backbone reconstructor and identifying anomalies based on reconstruction errors. Similar techniques include VGCRN [150] and FuSAGNet [151]. + +C. Deep learning methods for Anomaly Detection based on Hybrid Method + +In AD, reconstruction-based and prediction-based methods offer distinct but complementary approaches to identifying anomalies. Both methods rely on the discrepancy between the model's output and the actual input data as an indicator of abnormality. However, they diverge in how they handle data and their areas of application. Reconstruction-based methods focus on learning the underlying distribution of normal data. Once trained, the model attempts to recreate the input data. The reconstruction error, measured as the difference between the original data and its reconstruction, serves as a key indicator of anomalies. A high reconstruction error suggests that the data deviates from the normal patterns learned by the + +model. This approach is particularly effective in cases where understanding the full structure or distribution of the data is crucial, such as in image-based AD or other high-dimensional datasets. In contrast, prediction-based methods focus on forecasting specific attributes or missing values from the data, rather than reconstructing the entire input. These methods typically predict future values or infer missing data points by leveraging known features. If the predicted values significantly deviate from the actual values, this signals a potential anomaly. Prediction-based methods are often more suited to feature-rich datasets, where predicting specific variables can help identify irregular patterns. For instance, in applications like fraud detection, predicting expected behaviors or transactions can reveal anomalies when the predicted outcomes differ from the observed ones. While both methods differ in their data processing approaches, they can be highly complementary. In many cases, combining reconstruction-based and prediction-based techniques within a hybrid framework allows for more robust AD. Reconstruction models capture the overall structure and patterns in the data, while prediction models focus on detecting deviations in specific variables or features. This combination can provide a more comprehensive solution for identifying anomalies in complex datasets across various domains. Tang et al. [152] utilize a U-Net module as the prediction module to perform future frame prediction, amplifying reconstruction errors for abnormal events, while another U-Net module is used as the reconstruction module to enhance predicted frames for normal events, thus improving the effectiveness of AD. Lv et al. [31] adopt a dilated convolution-based autoencoder to integrate prediction errors and reconstruction errors into the output anomaly scores, effectively improving the generalization capability of the detection model. Liu et al. [153] leverage a reconstruction model and a prediction model within an end-to-end semi-supervised AD framework to effectively capture inter-variable correlations and temporal dependencies in multivariate time series data from wind turbines. Additionally, by incorporating an auxiliary discriminator with adversarial training, the model can progressively improve performance using limited labeled data, enhancing the transition from unsupervised to supervised AD. Wei et al. [154] propose a hybrid deep-learning model combining LSTM and autoencoder for AD in indoor air quality data, where the LSTM captures long-term dependencies in time-series data and the autoencoder uses reconstruction loss to detect anomalies, effectively addressing both temporal correlations and reconstruction errors for improved detection accuracy. + +# D. Summary and Insights + +This section introduces three types of deep learning-based AD methods: reconstruction-based, prediction-based, and hybrid approaches. Reconstruction-based methods are particularly effective in handling high-dimensional and unsupervised data by learning intrinsic patterns and identifying deviations through reconstruction errors. Prediction-based methods excel at modeling temporal dependencies in time-series data, enabling the detection of unexpected patterns in dynamic environments. Hybrid approaches combine these strengths + +to address complex scenarios where multiple anomaly types coexist. Notably, these methods demonstrate the power of deep learning in capturing intricate patterns and dependencies that traditional methods often miss, making them indispensable for tackling diverse and challenging AD tasks. + +# V. INTEGRATE TRADITIONAL METHOD AND DEEP LEARNING METHOD + +In the field of AD, traditional methods and deep learning approaches each offer unique advantages. Traditional methods, such as clustering [155] and Support Vector Data Description [156], are often simpler, more interpretable, and computationally efficient. These methods excel in providing transparent decision-making processes, making them suitable for applications where model interpretability is crucial. On the other hand, deep learning methods, with their ability to model complex, high-dimensional data distributions, offer enhanced detection accuracy and adaptability, especially for large datasets and unstructured data like images and sequences. + +The integration of traditional and deep learning methods aims to leverage the interpretability and simplicity of traditional methods with the robustness and flexibility of deep learning techniques. By combining these approaches, researchers seek to create hybrid models that maintain accuracy while offering insights into the underlying decision-making process, improving both detection power and model transparency. + +# A. Clustering method + +Clustering models play a crucial role in unsupervised AD, particularly for textual data. These models group similar data points based on their proximity in feature space and identify anomalies as points that deviate from established clusters [157]. Common clustering techniques, such as k-means [158], Density-Based Spatial Clustering of Applications with Noise (DBSCAN) [159], and hierarchical clustering [160], work effectively for simpler datasets and offer the advantage of interpretability. By integrating clustering methods with deep learning, such as applying clustering post feature extraction by a neural network, it is possible to improve detection accuracy while maintaining an interpretable clustering structure. This hybrid approach is particularly useful in cases where data distribution varies, and flexible, context-aware AD is required. For instance, Li et al. [161] propose a method that extends fuzzy clustering with a reconstruction criterion and Particle Swarm Optimization (PSO) to detect anomalies in both amplitude and shape. This highlights how traditional clustering methods can benefit from optimization techniques to handle diverse anomaly types. Similarly, Markovitz et al. [162] introduce an innovative approach for AD in human actions by working directly on human pose graphs extracted from video sequences. By mapping these graphs to a latent space, clustering them, and applying a Dirichlet process-based mixture model, the method effectively leverages probabilistic modeling to enhance the robustness and flexibility of clustering for action recognition. In video AD, Qiu et al. [163] propose a convolution-enhanced self-attentive video auto-encoder + +integrated with a dual-scale clustering module based on the K-means algorithm. This approach effectively distinguishes normal and abnormal video data by enhancing feature representations and addressing the fuzzy boundaries between them. Additionally, Peng et al. [33] introduce a multivariate ELM-MI framework combined with a dynamic kernel selection method. By employing hierarchical clustering on unlabeled data to determine kernels, this method enables unsupervised online detection of various anomaly types, including point and group anomalies, while reducing computational costs and improving robustness. These studies collectively highlight the potential of hybrid approaches that integrate clustering with advanced techniques like deep learning, probabilistic modeling, or optimization frameworks. Such methods leverage the interpretability and simplicity of traditional clustering while addressing its limitations in handling complex data, offering a promising pathway for accurate and flexible AD. + +# B. Normalizing Flows + +Normalizing Flows (NF) [164] offer a probabilistic framework for AD by estimating the probability distribution of data. Using a sequence of invertible transformations, NFs can model complex distributions, making them particularly effective for identifying anomalies as low-probability events. When integrated with deep learning models, such as CNNs or RNNs, NFs act as precise probabilistic estimators, complementing the feature extraction capabilities of deep networks. This hybrid framework enhances AD, particularly in high-dimensional or unstructured datasets. + +For instance, Yu et al. [165] propose FastFlow, a 2D normalizing flow module integrated with deep feature extractors like ResNet and Vision Transformers. By effectively modeling feature distributions and capturing both local and global relationships, FastFlow achieves state-of-the-art performance, with a $99.4\%$ AUC on the MVTec AD dataset, while maintaining high inference efficiency. Similarly, Cho et al. [166] introduce Implicit Two-path Autoencoder (ITAE), which reconstructs normal video patterns by implicitly modeling appearance and motion features through two encoders and a shared decoder. NF enhances ITAE by estimating the density of normal embeddings, enabling robust detection of out-of-distribution anomalies, with strong results across six surveillance benchmarks. For multivariate time series data, Zhou et al. [167] combine a graph structure learning model with entity-aware normalizing flows to capture interdependencies and evolving relations among entities. By estimating entity-specific densities and employing a clustering strategy for similar entities, the extended MTGFlow_cluster improves density estimation accuracy, demonstrating superior performance on six benchmark datasets. Further expanding on the use of graphs, Dai et al. [168] propose Graph-Augmented Normalizing Flow (GANF), which incorporates a Bayesian network to model causal relationships among time series. This approach factorizes joint probabilities into conditional probabilities, improving density estimation and enabling effective detection of anomalies in low-density regions, as well as identifying distribution drifts. + +These studies collectively highlight the strengths of integrating Normalizing Flows with traditional and deep learning- + +based methods. By combining the interpretability and precision of probabilistic models with the expressive power of deep networks or graph structures, these hybrid approaches address the challenges of complex data distributions, offering scalable and robust solutions for diverse AD tasks. This synergy underscores the potential of such methods to push the boundaries of accuracy and adaptability in real-world applications. + +# C. Support Vector Data Description + +Support Vector Data Description (SVDD) [156] is a traditional machine learning method used to define a boundary around normal data points, effectively distinguishing them from anomalies. Unlike binary classification, SVDD is particularly effective for one-class classification tasks, where only normal data is available. This approach is computationally efficient and interpretable, as it provides a clear boundary between normal and abnormal points. By integrating SVDD with deep learning, researchers can enhance the boundary definition based on high-dimensional features extracted by a neural network, resulting in a model that combines the boundary precision of SVDD with the feature richness of deep learning. This hybrid model is highly effective in scenarios where boundary clarity and interpretability are paramount, such as in industrial monitoring or fraud detection. + +To improve latent representations, Zhou et al. [169] propose Deep SVDD-VAE, which jointly optimizes VAE and SVDD. The VAE reconstructs input data, and SVDD simultaneously defines a spherical boundary in the latent space, ensuring separability of normal and anomalous instances. This joint optimization significantly outperforms traditional AE-based methods, as shown on MNIST, CIFAR-10, and GTSRB datasets. For variable-length time series data, Ergen et al. [124] introduce an LSTM-based AD framework, where LSTM and SVDD are jointly optimized using modified objectives. This method extends seamlessly to GRU architectures, demonstrating strong performance across unsupervised, semisupervised, and supervised settings. Besides, Zhang et al. [170] propose Deep Structure Preservation SVDD (DSPSVDD), which simultaneously minimizes hypersphere volume and network reconstruction error. This dual objective ensures deep feature preservation and enhances AD performance, outperforming traditional SVDD models on datasets like MNIST and MVTec AD. + +These studies highlight the strengths of combining SVDD with deep learning, where deep models enhance feature representation while SVDD ensures boundary precision. This hybrid framework effectively addresses limitations in both methods, offering a scalable and interpretable solution for complex AD tasks across diverse domains. + +# D. Summary and Insights + +This section explores the integration of traditional and deep learning methods for AD, highlighting how their complementary strengths can be combined. Traditional methods, known for their simplicity, interpretability, and computational efficiency, excel in scenarios where transparency is critical. In contrast, deep learning methods offer superior adaptability + +and accuracy, particularly for high-dimensional and unstructured data. By integrating these approaches, hybrid models can leverage the interpretability of traditional methods while retaining the robustness and flexibility of deep learning. This fusion not only enhances AD performance but also bridges the gap between accuracy and model transparency, making it a promising direction for future research. + +# VI. OPEN ISSUES AND FUTURE WORKS + +# A. Data Collection + +Data scarcity and class imbalance remain major challenges in AD. Since anomalies are rare, obtaining large labeled datasets is costly and time-consuming, especially when expert annotation is required. Supervised learning struggles due to the lack of abnormal samples, while the overwhelming presence of normal data biases models toward common patterns. This problem is particularly critical in cybersecurity, healthcare, and industrial monitoring, where undetected anomalies can have serious consequences. + +Several approaches mitigate these issues. Semi-supervised and unsupervised learning exploit normal data distributions to detect deviations without requiring labeled anomalies [171] [172]. Data augmentation, synthetic data generation, and oversampling improve data balance by increasing the number of anomalous examples, helping models generalize better [173] [174]. Despite these advancements, challenges remain. Semi-supervised methods struggle with subtle anomalies that closely resemble normal data. Augmentation techniques, often based on simple transformations, may fail to capture complex domain-specific variations. Similarly, synthetic data generation may not fully reflect real-world anomaly diversity, leading to models biased toward normal samples. Moreover, even with augmentation, models risk overfitting to the majority class, compromising anomaly detection performance. Ensuring that models remain sensitive to rare anomalies while maintaining accuracy on normal data remains an ongoing challenge. Future research may focus on refining self-supervised learning [175], improving the diversity of synthetic samples [176], and developing more adaptive anomaly detection frameworks to enhance robustness in real-world applications. + +# B. Computational Complexity + +In AD, computational complexity is a crucial factor, especially for systems operating in real-time environments or handling large-scale datasets. The efficiency of an algorithm directly impacts its feasibility in fields like industrial monitoring, cybersecurity, and autonomous systems, where swift detection is essential. Many advanced models, particularly deep learning approaches like autoencoders, GANs, and LSTMs, are computationally intensive due to their complex architectures and iterative learning processes. This often leads to trade-offs between detection accuracy and computational efficiency, with continuous efforts aimed at optimizing models to reduce computational demands without sacrificing performance. + +Moreover, AD models frequently require substantial memory resources, especially when dealing with high-dimensional + +or streaming data, making memory usage a crucial consideration. Techniques like memory-efficient architectures, data compression, and sparse modeling are commonly used to address this issue. Real-time AD adds further complexity, as algorithms must process incoming data and make rapid decisions in applications like autonomous driving and fraud detection [177], where even minimal delays can have severe consequences. Achieving real-time performance typically involves optimizing data processing speeds and decision-making through lightweight models [178] [179] and parallel processing techniques, such as GPU acceleration [180]. However, balancing real-time detection capabilities with high accuracy remains challenging. + +The tension between computational complexity and detection accuracy persists, as complex models often excel in detection but lack practical applicability for real-time or large-scale scenarios. Simpler models, though computationally efficient, may fail to detect nuanced anomalies. Hybrid models or multi-stage frameworks that deploy complex methods only as needed provide a potential solution. Additionally, future research may benefit from exploring distributed computing solutions, like cloud [181] or edge computing, to enhance real-time AD performance in resource-limited environments. + +# C. Explainability and Interpretability + +Deep learning methods have greatly advanced AD by capturing complex patterns in high-dimensional data. However, they are often criticized as "black-box" models due to their lack of transparency, making it challenging to understand why certain data points are flagged as anomalies. For fields like healthcare, finance, or industrial monitoring, accurate detection alone is insufficient; stakeholders also need clear explanations to understand why a particular anomaly was detected. This lack of interpretability limits the practical deployment of deep learning models, as the inability to justify decisions reduces trust and hinders adoption in critical applications. + +In fields like healthcare, where anomalies may be linked to medical diagnoses, or in finance, where fraud detection can carry legal implications, interpretability is essential. Transparent model decisions enable experts to validate results and make informed decisions. In safety-critical applications, such as autonomous driving or industrial equipment monitoring, understanding the rationale behind AD is vital for ensuring safety. One major challenge is balancing the trade-off between model interpretability and performance. Simpler models, like decision trees or linear regression, offer greater transparency but often lack the complexity needed to detect subtle anomalies in high-dimensional data. In contrast, deep learning models provide high accuracy but are harder to interpret. + +Ongoing research is exploring hybrid approaches, where interpretable models are combined with more complex ones, allowing for accurate AD with the added benefit of interpretability. For example, attention mechanisms [182] in neural networks can help highlight specific data regions influencing decisions, providing insights into the model's internal workings. Alternatively, tools like Local Interpretable Model-agnostic Explanations (LIME) and SHapley Additive exPlanations (SHAP) [2] can offer post-hoc explanations, improving + +transparency without altering model structure. Future research could also focus on real-time explainability in time-sensitive applications, and incorporating domain knowledge or user feedback to enhance model interpretability. + +# D. Handling Diverse Types of Anomalies + +In real-world AD, multiple types of anomalies often coexist, adding complexity to the detection process. Beyond point anomalies, which are the simplest, other types like contextual and collective anomalies are common, especially in dynamic environments. For instance, in intelligent transportation systems, anomalies may include both isolated incidents (e.g., a single vehicle's sudden deceleration) and collective patterns (e.g., multiple vehicles simultaneously slowing down), each requiring different detection methods. Effectively capturing these varied anomaly types requires flexible models capable of adapting to different anomaly patterns without focusing on only one type. + +Continuous research is needed to develop models that can generalize across anomaly types, enhancing adaptability and balancing detection accuracy with model flexibility. Hybrid approaches, for instance, can integrate different methods to capture diverse anomalies more effectively. The challenge remains in achieving this versatility without sacrificing accuracy, as models must maintain strong performance across different contexts. Future work may also explore multi-modal models [183] that combine different types of data, further improving detection capabilities by drawing from diverse data sources. These directions aim to create AD systems that are both robust and adaptable, capable of handling the complex and mixed nature of real-world anomaly scenarios. + +# VII. CONCLUSION + +In this survey, we have provided a comprehensive overview of the recent advancements in AD with a primary focus on deep learning techniques from 2019 to 2024. By analyzing over 180 research papers from leading journals and conferences, we have explored how AD methods have evolved to address diverse challenges across various types of data. This survey categorizes and examines deep learning methods into reconstruction-based, prediction-based, and hybrid approaches, highlighting their strengths, limitations, and applications. Recognizing the simplicity, interpretability, and computational efficiency of traditional AD methods, we reviewed their integration with deep learning techniques. These hybrid approaches aim to leverage the strengths of both paradigms, enhancing robustness and efficiency in AD systems. This survey not only sheds light on the state-of-the-art techniques but also identifies gaps and opportunities for future research. By focusing on the latest trends and innovations, this work aims to inspire further exploration and advancements in the rapidly evolving field of AD. + +# REFERENCES + +[1] L. Ruff, J. R. Kauffmann, R. A. Vandermeulen, G. Montavon, W. Samek, M. Kloft, T. G. Dietterich, and K.-R. Müller, “A unifying review of deep and shallow anomaly detection,” Proceed. IEEE, vol. 109, no. 5, pp. 756–795, 2021. + +[2] V. Vimbi, N. Shaffi, and M. Mahmud, "Interpreting artificial intelligence models: a systematic review on the application of lime and shap in alzheimer's disease detection," Brain Informatics, vol. 11, no. 1, p. 10, 2024. +[3] F. Al-Turjman, H. Zahmatkesh, and R. Shahroze, “An overview of security and privacy in smart cities’ IoT communications,” Trans. Emerg. Telecommun. Technol., vol. 33, no. 3, p. e3677, 2022. +[4] Y. A. Qadri, A. Nauman, Y. B. Zikria, A. V. Vasilakos, and S. W. Kim, "The future of healthcare internet of things: a survey of emerging technologies," IEEE Commun. Surv. Tutor., vol. 22, no. 2, pp. 1121-1167, 2020. +[5] M. Humayun, N. Jhanjhi, B. Hamid, and G. Ahmed, “Emerging smart logistics and transportation using IoT and blockchain,” IEEE Internet Things Mag., vol. 3, no. 2, pp. 58–62, 2020. +[6] S. H. Haji and S. Y. Ameen, "Attack and anomaly detection in IoT networks using machine learning techniques: A review," Asian J. Res. Comput. Sci, vol. 9, no. 2, pp. 30-46, 2021. +[7] V. Mothukuri, P. Khare, R. M. Parizi, S. Pouriyeh, A. Dehghantanha, and G. Srivastava, "Federated-learning-based anomaly detection for IoT security attacks," IEEE Internet Things J., vol. 9, no. 4, pp. 2545-2554, 2021. +[8] S. A. Al Mamun and J. Valimaki, “Anomaly detection and classification in cellular networks using automatic labeling technique for applying supervised learning,” *Proceedia Comput. Sci.*, vol. 140, pp. 186-195, 2018. +[9] M. E. Villa-Pérez, M. A. Alvarez-Carmona, O. Loyola-Gonzalez, M. A. Medina-Pérez, J. C. Velazco-Rossell, and K.-K. R. Choo, "Semisupervised anomaly detection algorithms: A comparative summary and future research directions," Knowledge-Based Systems, vol. 218, p. 106878, 2021. +[10] G. Michau and O. Fink, "Unsupervised transfer learning for anomaly detection: Application to complementary operating condition transfer," Knowledge-Based Systems, vol. 216, p. 106816, 2021. +[11] Y. Liang, J. Zhang, S. Zhao, R. Wu, Y. Liu, and S. Pan, "Omni-frequency channel-selection representations for unsupervised anomaly detection," IEEE Trans. Image Process., 2023. +[12] B. Siegel, "Industrial anomaly detection: A comparison of unsupervised neural network architectures," IEEE Sens. Lett., vol. 4, no. 8, pp. 1-4, 2020. +[13] P. Bergmann, M. Fauser, D. Sattlegger, and C. Steger, "Mvtec ad-a comprehensive real-world dataset for unsupervised anomaly detection," in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2019, pp. 9592-9600. +[14] S. Schmidl, P. Wenig, and T. Papenbrock, "Anomaly detection in time series: a comprehensive evaluation," Proc. VLDB Endow., vol. 15, no. 9, pp. 1779-1797, 2022. +[15] S. Zhai, Y. Cheng, W. Lu, and Z. Zhang, "Deep structured energy based models for anomaly detection," in Int. Conf. Mach. Learn. (ICML). PMLR, 2016, pp. 1100-1109. +[16] H. Sarmadi and A. Karamodin, “A novel anomaly detection method based on adaptive mahalanobis-squared distance and one-class knn rule for structural health monitoring under environmental effects,” Mech. Syst. Signal Process., vol. 140, p. 106495, 2020. +[17] I. Syarif, A. Prugel-Bennett, and G. Wills, “Unsupervised clustering approach for network anomaly detection,” in Netw. Digit. Technol., Int. Conf., NDT 2012, Proc., Part I. Springer, 2012, pp. 135–145. +[18] D. Samariya and A. Thakkar, “A comprehensive survey of anomaly detection algorithms,” Ann. Data Sci., vol. 10, no. 3, pp. 829–850, 2023. +[19] G. Pang, C. Shen, L. Cao, and A. V. D. Hengel, “Deep learning for anomaly detection: A review,” ACM Comput. Surv., vol. 54, no. 2, pp. 1-38, 2021. +[20] L. Bergman, N. Cohen, and Y. Hoshen, "Deep nearest neighbor anomaly detection," arXiv preprint arXiv:2002.10445, 2020. +[21] K. Leung and C. Leckie, "Unsupervised anomaly detection in network intrusion detection using clusters," in Proc. 28th Australas. Conf. Comput. Sci., vol. 38, 2005, pp. 333-342. +[22] H. Ringberg, A. Soule, J. Rexford, and C. Diot, "Sensitivity of pca for traffic anomaly detection," in Proc. 2007 ACM SIGMETRICS Int. Conf. Meas. Model. Comput. Syst., 2007, pp. 109-120. +[23] D. Kwon, H. Kim, J. Kim, S. C. Suh, I. Kim, and K. J. Kim, “A survey of deep learning-based network anomaly detection,” Cluster Computing, vol. 22, pp. 949–961, 2019. +[24] A. Aldweesh, A. Derhab, and A. Z. Emam, "Deep learning approaches for anomaly-based intrusion detection systems: A survey, taxonomy, and open issues," Knowl.-Based Syst., vol. 189, p. 105124, 2020. + +[25] L. Li, J. Yan, H. Wang, and Y. Jin, "Anomaly detection of time series with smoothness-inducing sequential variational auto-encoder," IEEE Trans. Neural Netw. Learn. Syst., vol. 32, no. 3, pp. 1177-1191, 2020. +[26] G. Harshvardhan, M. K. Gourisaria, M. Pandey, and S. S. Rautaray, "A comprehensive survey and analysis of generative models in machine learning," Comput. Sci. Rev., vol. 38, p. 100285, 2020. +[27] B. Nachman and D. Shih, "Anomaly detection with density estimation," Phys. Rev. D, vol. 101, no. 7, p. 075042, 2020. +[28] A. B. Nassif, M. A. Talib, Q. Nasir, and F. M. Dakalbab, "Machine learning for anomaly detection: A systematic review," IEEE Access, vol. 9, pp. 78658-78700, 2021. +[29] X. Ma, J. Wu, S. Xue, J. Yang, C. Zhou, Q. Z. Sheng, H. Xiong, and L. Akoglu, “A comprehensive survey on graph anomaly detection with deep learning,” IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12012–12038, 2021. +[30] X. Xia, X. Pan, N. Li, X. He, L. Ma, X. Zhang, and N. Ding, “Gan-based anomaly detection: A review,” Neurocomputing, vol. 493, pp. 497-535, 2022. +[31] J. Lv, Y. Wang, and S. Chen, "Adaptive multivariate time-series anomaly detection," Inf. Process. Manag., vol. 60, no. 4, p. 103383, 2023. +[32] M. Y. I. Basheer, A. M. Ali, N. H. A. Hamid, M. A. M. Ariffin, R. Osman, S. Nordin, and X. Gu, "Autonomous anomaly detection for streaming data," Knowledge-Based Systems, vol. 284, p. 111235, 2024. +[33] X. Peng, H. Li, F. Yuan, S. G. Razul, Z. Chen, and Z. Lin, "An extreme learning machine for unsupervised online anomaly detection in multivariate time series," Neurocomputing, vol. 501, pp. 596-608, 2022. +[34] Y. Choi, H. Lim, H. Choi, and I.-J. Kim, "Gan-based anomaly detection and localization of multivariate time series data for power plant," in Proc. 2020 IEEE Int. Conf. Big Data Smart Comput. (BigComp). IEEE, 2020, pp. 71-74. +[35] H.-T. Duong, V.-T. Le, and V. T. Hoang, "Deep learning-based anomaly detection in video surveillance: a survey," Sensors, vol. 23, no. 11, p. 5024, 2023. +[36] S. Thudumu, P. Branch, J. Jin, and J. Singh, "A comprehensive survey of anomaly detection techniques for high dimensional big data," Journal of Big Data, vol. 7, pp. 1-30, 2020. +[37] I. Souiden, M. N. Omri, and Z. Brahmi, “A survey of outlier detection in high dimensional data streams,” Comput. Sci. Rev., vol. 44, p. 100463, 2022. +[38] Q. Ding and E. D. Kolaczyk, “A compressed pca subspace method for anomaly detection in high-dimensional data,” IEEE Trans. Inf. Theory, vol. 59, no. 11, pp. 7419–7433, 2013. +[39] M. Sakurada and T. Yairi, "Anomaly detection using autoencoders with nonlinear dimensionality reduction," in Proc. MLSDA 2014 2nd Workshop Mach. Learn. Sensory Data Anal., 2014, pp. 4-11. +[40] T. Cheng and B. Wang, "Total variation and sparsity regularized decomposition model with union dictionary for hyperspectral anomaly detection," IEEE Trans. Geosci. Remote Sens., vol. 59, no. 2, pp. 1472-1486, 2020. +[41] L. Li, W. Li, Q. Du, and R. Tao, "Low-rank and sparse decomposition with mixture of gaussian for hyperspectral anomaly detection," IEEE Trans. Cybern., vol. 51, no. 9, pp. 4363-4372, 2021. +[42] S. Han and S. S. Woo, “Learning sparse latent graph representations for anomaly detection in multivariate time series,” in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min., 2022, pp. 2977–2986. +[43] X. Ma and W. Shi, “Aesmote: Adversarial reinforcement learning with smote for anomaly detection,” IEEE Trans. Netw. Sci. Eng., vol. 8, no. 2, pp. 943–956, 2021. +[44] M. Kim, E. Ou, P.-L. Loh, T. Allen, R. Agasie, and K. Liu, "Rnn-based online anomaly detection in nuclear reactors for highly imbalanced datasets with uncertainty," Nucl. Eng. Des., vol. 364, p. 110699, 2020. +[45] G. Dlamini and M. Fahim, “Dgm: a data generative model to improve minority class presence in anomaly detection domain,” Neural Comput. Appl., vol. 33, pp. 13635–13646, 2021. +[46] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, "Adbench: Anomaly detection benchmark," Adv. Neural Inf. Process. Syst., vol. 35, pp. 32-142-32-159, 2022. +[47] Y. Zhang, Y. Chen, J. Wang, and Z. Pan, "Unsupervised deep anomaly detection for multi-sensor time-series signals," IEEE Trans. Knowl. Data Eng., vol. 35, no. 2, pp. 2118-2132, 2023. +[48] D. Chen, L. Yue, X. Chang, M. Xu, and T. Jia, "Nm-gan: Noise-modulated generative adversarial network for video anomaly detection," Pattern Recognition, vol. 116, p. 107969, 2021. + +[49] M. U. Hassan, M. H. Rehmani, and J. Chen, "Anomaly detection in blockchain networks: A comprehensive survey," IEEE Commun. Surv. Tutor., vol. 25, no. 1, pp. 289-318, 2022. +[50] Y. Liu, S. Garg, J. Nie, Y. Zhang, Z. Xiong, J. Kang, and M. S. Hossain, "Deep anomaly detection for time-series data in industrial IoT: A communication-efficient on-device federated learning approach," IEEE Internet Things J., vol. 8, no. 8, pp. 6348-6358, 2020. +[51] M. J. Idrissi, H. Alami, A. El Mahdaouy, A. El Mekki, S. Oualil, Z. Yartaoui, and I. Berrada, “Fed-anids: Federated learning for anomaly-based network intrusion detection systems,” Expert Syst. Appl., vol. 234, p. 121000, 2023. +[52] L. Cui, Y. Qu, G. Xie, D. Zeng, R. Li, S. Shen, and S. Yu, "Security and privacy-enhanced federated learning for anomaly detection in IoT infrastructures," IEEE Trans. Ind. Inform., vol. 18, no. 5, pp. 3492-3500, 2022. +[53] X. Wang, J. Liu, T. Qiu, C. Mu, C. Chen, and P. Zhou, "A real-time collision prediction mechanism with deep learning for intelligent transportation system," IEEE Trans. Veh. Technol., vol. 69, no. 9, pp. 9497-9508, 2020. +[54] G. Li, T.-H. Nguyen, and J. J. Jung, "Traffic incident detection based on dynamic graph embedding in vehicular edge computing," Appl. Sci., vol. 11, no. 13, p. 5861, 2021. +[55] G. Li and J. J. Jung, "Deep learning for anomaly detection in multivariate time series: Approaches, applications, and challenges," Inf. Fusion, vol. 91, pp. 93-102, 2023. +[56] C. Zhao, X. Chang, T. Xie, H. Fujita, and J. Wu, "Unsupervised anomaly detection based method of risk evaluation for road traffic accident," Appl. Intell., vol. 53, no. 1, pp. 369-384, 2023. +[57] S. Li, A. Pandey, B. Hooi, C. Faloutsos, and L. Pileggi, "Dynamic graph-based anomaly detection in the electrical grid," IEEE Trans. Power Syst., vol. 37, no. 5, pp. 3408-3422, 2022. +[58] X. Wang and S.-H. Ahn, “Real-time prediction and anomaly detection of electrical load in a residential community,” Appl. Energy, vol. 259, p. 114145, 2020. +[59] I. Siniosoglou, P. Radoglou-Grammatikis, G. Efstathopoulos, P. Fouliras, and P. Sarigiannidis, “A unified deep learning anomaly detection and classification approach for smart grid environments,” IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1137-1151, 2021. +[60] T. Fernando, H. Gammulle, S. Denman, S. Sridharan, and C. Fookes, "Deep learning for medical anomaly detection-a survey," ACM Comput. Surv., vol. 54, no. 7, pp. 1-37, 2021. +[61] E. Šabić, D. Keeley, B. Henderson, and S. Nannemann, “Healthcare and anomaly detection: using machine learning to predict anomalies in heart rate data,” *Ai & Society*, vol. 36, no. 1, pp. 149–158, 2021. +[62] K. G. Al-Hashedi and P. Magalingam, “Financial fraud detection applying data mining techniques: A comprehensive review from 2009 to 2019,” Comput. Sci. Rev., vol. 40, p. 100402, 2021. +[63] W. Hilal, S. A. Gadsden, and J. Yawney, "Financial fraud: a review of anomaly detection techniques and recent advances," Expert Syst. Appl., vol. 193, p. 116429, 2022. +[64] H. Fujita, A. Gaeta, V. Loia, and F. Orciuoli, “Resilience analysis of critical infrastructures: A cognitive approach based on granular computing,” IEEE Trans. Cybern., vol. 49, no. 5, pp. 1835–1848, 2019. +[65] V. K. Singh and M. Govindarasu, “A cyber-physical anomaly detection for wide-area protection using machine learning,” IEEE Trans. Smart Grid, vol. 12, no. 4, pp. 3514–3526, 2021. +[66] S. M. Nagarajan, G. G. Deverajan, A. K. Bashir, R. P. Mahapatra, and M. S. Al-Numay, "TADF-cps: Intelligent anomaly detection framework towards cyber physical systems," Comput. Commun., vol. 188, pp. 81–89, 2022. +[67] T. Nakao, S. Hanaoka, Y. Nomura, M. Murata, T. Takenaga, S. Miki, T. Watadani, T. Yoshikawa, N. Hayashi, and O. Abe, "Unsupervised deep anomaly detection in chest radiographs," J. Digit. Imaging, vol. 34, pp. 418-427, 2021. +[68] W. H. Pinaya, P.-D. Tudosiu, R. Gray, G. Rees, P. Nachev, S. Ourselin, and M. J. Cardoso, "Unsupervised brain imaging 3d anomaly detection and segmentation with transformers," Med. Image Anal., vol. 79, p. 102475, 2022. +[69] L. Chen, Z. You, N. Zhang, J. Xi, and X. Le, “Utrad: Anomaly detection and localization with u-transformer,” Neural Networks, vol. 147, pp. 53–62, 2022. +[70] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, "Anomaly detection in medical imaging with deep perceptual autoencoders," IEEE Access, vol. 9, pp. 118571-118583, 2021. +[71] R. L. Draelos, D. Dov, M. A. Mazurowski, J. Y. Lo, R. Henao, G. D. Rubin, and L. Carin, "Machine-learning-based multiple abnormality + +prediction with large-scale chest computed tomography volumes," Med. Image Anal., vol. 67, p. 101857, 2021. +[72] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, "Anomaly detection in medical imaging with deep perceptual autoencoders," IEEE Access, vol. 9, pp. 118571-118583, 2021. +[73] H. Zhao, Y. Li, N. He, K. Ma, L. Fang, H. Li, and Y. Zheng, "Anomaly detection for medical images using self-supervised and translation-consistent features," IEEE Trans. Med. Imaging, vol. 40, no. 12, pp. 3641-3651, 2021. +[74] R. Nayak, U. C. Pati, and S. K. Das, “A comprehensive review on deep learning-based methods for video anomaly detection,” Image Vis. Comput., vol. 106, p. 104078, 2021. +[75] Y. Wang, T. Liu, J. Zhou, and J. Guan, "Video anomaly detection based on spatio-temporal relationships among objects," Neurocomputing, vol. 532, pp. 141-151, 2023. +[76] N. Li, F. Chang, and C. Liu, "Spatial-temporal cascade autoencoder for video anomaly detection in crowded scenes," IEEE Trans. Multimed., vol. 23, pp. 203-215, 2020. +[77] D. Chen, P. Wang, L. Yue, Y. Zhang, and T. Jia, “Anomaly detection in surveillance video based on bidirectional prediction,” Image Vis. Comput., vol. 98, p. 103915, 2020. +[78] M. H. Bhuyan, D. K. Bhattacharyya, and J. K. Kalita, “Network anomaly detection: methods, systems and tools,” IEEE Commun. Surv. Tutor., vol. 16, no. 1, pp. 303-336, 2013. +[79] S. Liu, B. Zhou, Q. Ding, B. Hooi, Z. Zhang, H. Shen, and X. Cheng, "Time series anomaly detection with adversarial reconstruction networks," IEEE Trans. Knowl. Data Eng., vol. 35, no. 4, pp. 4293-4306, 2022. +[80] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, 2024. +[81] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio, "Generative adversarial networks," Commun. ACM, vol. 63, no. 11, pp. 139–144, 2020. +[82] L. Yang, Z. Zhang, Y. Song, S. Hong, R. Xu, Y. Zhao, W. Zhang, B. Cui, and M.-H. Yang, "Diffusion models: A comprehensive survey of methods and applications," ACM Comput. Surv., vol. 56, no. 4, pp. 1-39, 2023. +[83] S. Bond-Taylor, A. Leach, Y. Long, and C. G. Willcocks, “Deep generative modelling: A comparative review of vaes, gans, normalizing flows, energy-based and autoregressive models,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 44, no. 11, pp. 7327-7347, 2021. +[84] S. Sheynin, S. Benaim, and L. Wolf, “A hierarchical transformation-discriminating generative model for few shot anomaly detection,” in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2021, pp. 8495-8504. +[85] W. Lim, K. Y. S. Chek, L. B. Theng, and C. T. C. Lin, “Future of generative adversarial networks (gan) for anomaly detection in network security: A review,” Comput. Secur., p. 103733, 2024. +[86] X. Du, J. Chen, J. Yu, S. Li, and Q. Tan, "Generative adversarial nets for unsupervised outlier detection," Expert Syst. Appl., vol. 236, p. 121161, 2024. +[87] J. Wu, Z. Zhao, C. Sun, R. Yan, and X. Chen, “Fault-attention generative probabilistic adversarial autoencoder for machine anomaly detection,” IEEE Trans. Ind. Inf., vol. 16, no. 12, pp. 7479–7488, 2020. +[88] F. Dong, Y. Zhang, and X. Nie, "Dual discriminator generative adversarial network for video anomaly detection," IEEE Access, vol. 8, pp. 88170-88176, 2020. +[89] D. Guo, Z. Liu, and R. Li, "Regraphgan: A graph generative adversarial network model for dynamic network anomaly detection," Neural Networks, vol. 166, pp. 273-285, 2023. +[90] Y. Liu, Z. Li, C. Zhou, Y. Jiang, J. Sun, M. Wang, and X. He, "Generative adversarial active learning for unsupervised outlier detection," IEEE Trans. Knowl. Data Eng., vol. 32, no. 8, pp. 1517-1528, 2019. +[91] C. Liu, Z. Kong, S. Babu, C. Joslin, and J. Ferguson, "An integrated manifold learning approach for high-dimensional data feature extractions and its applications to online process monitoring of additive manufacturing," IISE Transactions, vol. 53, no. 11, pp. 1215-1230, 2021. +[92] J. Miao, H. Tao, H. Xie, J. Sun, and J. Cao, "Reconstruction-based anomaly detection for multivariate time series using contrastive generative adversarial networks," Inf. Process. Manag., vol. 61, no. 1, p. 103569, 2024. +[93] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, "Anomaly-gan: A data augmentation method for train surface anomaly detection," Expert Syst. Appl., vol. 228, p. 120284, 2023. + +[94] Y. Li, Z. Shi, C. Liu, W. Tian, Z. Kong, and C. B. Williams, "Augmented time regularized generative adversarial network (atr-gan) for data augmentation in online process anomaly detection," IEEE Trans. Autom. Sci. Eng., vol. 19, no. 4, pp. 3338-3355, 2021. +[95] L. Zhang, W. Bai, X. Xie, L. Chen, and P. Dong, “Tmanomaly: Time-series mutual adversarial networks for industrial anomaly detection,” IEEE Trans. Ind. Inform., 2023. +[96] B. Du, X. Sun, J. Ye, K. Cheng, J. Wang, and L. Sun, "Gan-based anomaly detection for multivariate time series using polluted training set," IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12 208-12 219, 2021. +[97] G. Fan, Y. Ma, X. Mei, F. Fan, J. Huang, and J. Ma, “Hyperspectral anomaly detection with robust graph autoencoders,” IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021. +[98] S. Wang, X. Wang, L. Zhang, and Y. Zhong, "Auto-ad: Autonomous hyperspectral anomaly detection network based on fully convolutional autoencoder," IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021. +[99] H. Liu, X. Su, X. Shen, and X. Zhou, "Msnet: Self-supervised multiscale network with enhanced separation training for hyperspectral anomaly detection," IEEE Trans. Geosci. Remote Sens., 2024. +[100] X. Lin, Z. Li, H. Fan, Y. Fu, and X. Chen, “Exploiting negative correlation for unsupervised anomaly detection in contaminated time series,” Expert Syst. Appl., p. 123535, 2024. +[101] C. Huang, Z. Yang, J. Wen, Y. Xu, Q. Jiang, J. Yang, and Y. Wang, "Self-supervision-augmented deep autoencoder for unsupervised visual anomaly detection," IEEE Trans. Cybern., vol. 52, no. 12, pp. 13834-13847, 2021. +[102] C. Yin, S. Zhang, J. Wang, and N. N. Xiong, "Anomaly detection based on convolutional recurrent autoencoder for IoT time series," IEEE Trans. Syst. Man Cybern.: Syst., vol. 52, no. 1, pp. 112-122, 2020. +[103] W. Zhang, C. Zhang, and F. Tsung, “Grelen: Multivariate time series anomaly detection from the perspective of graph relational learning,” in IJCAI, 2022, pp. 2390–2397. +[104] X. Zhou, Y. Hu, W. Liang, J. Ma, and Q. Jin, "Variational lstm enhanced anomaly detection for industrial big data," IEEE Trans. Ind. Inform., vol. 17, no. 5, pp. 3469-3477, 2020. +[105] A. Makhzani, J. Shlens, N. Jaitly, I. Goodfellow, and B. Frey, "Adversarial autoencoders," arXiv preprint arXiv:1511.05644, 2015. +[106] Q. Su, B. Tian, H. Wan, and J. Yin, "Anomaly detection under contaminated data with contamination-immune bidirectional gans," IEEE Trans. Knowl. Data Eng., 2024. +[107] J. Yu, X. Gao, F. Zhai, B. Li, B. Xue, S. Fu, L. Chen, and Z. Meng, "An adversarial contrastive autoencoder for robust multivariate time series anomaly detection," Expert Syst. Appl., vol. 245, p. 123010, 2024. +[108] J. Ho, A. Jain, and P. Abbeel, “Denoising diffusion probabilistic models,” Adv. Neural Inf. Process. Syst., vol. 33, pp. 6840–6851, 2020. +[109] J. Wolleb, F. Bieder, R. Sandkühler, and P. C. Cattin, "Diffusion models for medical anomaly detection," in Int. Conf. Med. Image Comput. Comput.-Assist. Interv. (MICCAI). Springer, 2022, pp. 35-45. +[110] X. Zhang, N. Li, J. Li, T. Dai, Y. Jiang, and S.-T. Xia, "Unsupervised surface anomaly detection with diffusion probabilistic model," in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2023, pp. 6782-6791. +[111] S. Li, J. Yu, Y. Lu, G. Yang, X. Du, and S. Liu, "Self-supervised enhanced denoising diffusion for anomaly detection," Inf. Sci., vol. 669, p. 120612, 2024. +[112] J. Zeng, X. Liu, and Z. Li, "Radio anomaly detection based on improved denoising diffusion probabilistic models," IEEE Commun. Lett., 2023. +[113] X. Li, C. Xiao, Z. Feng, S. Pang, W. Tai, and F. Zhou, "Controlled graph neural networks with denoising diffusion for anomaly detection," Expert Syst. Appl., vol. 237, p. 121533, 2024. +[114] C. Li, G. Feng, Y. Li, R. Liu, Q. Miao, and L. Chang, “Diffstad: Denoising diffusion probabilistic models for vehicle trajectory anomaly detection,” Knowledge-Based Systems, vol. 286, p. 111387, 2024. +[115] J. Pei, J. Wang, D. Shi, and P. Wang, "Detection and imputation-based two-stage denoising diffusion power system measurement recovery under cyber-physical uncertainties," IEEE Trans. Smart Grid, vol. 15, no. 6, pp. 5965-5980, 2024. +[116] H. He, J. Zhang, H. Chen, X. Chen, Z. Li, X. Chen, Y. Wang, C. Wang, and L. Xie, "A diffusion-based framework for multi-class anomaly detection," in Proc. AAAI Conf. Artif. Intell., vol. 38, no. 8, 2024, pp. 8472-8480. +[117] A. Sherstinsky, “Fundamentals of recurrent neural network (rnn) and long short-term memory (lstm) network,” Physica D: Nonlinear Phenomena, vol. 404, p. 132306, 2020. + +[118] G. Van Houdt, C. Mosquera, and G. Nápoles, “A review on the long short-term memory model,” Artif. Intell. Rev., vol. 53, no. 8, pp. 5929–5955, 2020. +[119] B. Lindemann, B. Maschler, N. Sahlab, and M. Weyrich, “A survey on anomaly detection for technical systems using lstm networks,” Comput. Ind., vol. 131, p. 103498, 2021. +[120] R. Dey and F. M. Salem, “Gate-variants of gated recurrent unit (gru) neural networks,” in Proc. 2017 IEEE 60th Int. Midwest Symp. Circuits Syst. (MWSCAS). IEEE, 2017, pp. 1597–1600. +[121] Y. Wang, X. Du, Z. Lu, Q. Duan, and J. Wu, "Improved lstm-based time-series anomaly detection in rail transit operation environments," IEEE Trans. Ind. Inform., vol. 18, no. 12, pp. 9027-9036, 2022. +[122] H. Chen, H. Liu, X. Chu, Q. Liu, and D. Xue, "Anomaly detection and critical scada parameters identification for wind turbines based on lstm-ae neural network," Renew. Energy, vol. 172, pp. 829-840, 2021. +[123] P. Liu, X. Sun, Y. Han, Z. He, W. Zhang, and C. Wu, "Arrhythmia classification of lstm autoencoder based on time series anomaly detection," Biomed. Signal Process. Control, vol. 71, p. 103228, 2022. +[124] Y. Yao, J. Ma, S. Feng, and Y. Ye, "Svd-ae: An asymmetric autoencoder with svd regularization for multivariate time series anomaly detection," Neural Networks, vol. 170, pp. 535-547, 2024. +[125] S. Longari, D. H. N. Valcarcel, M. Zago, M. Carminati, and S. Zanero, "Cannolo: An anomaly detection system based on lstm autoencoders for controller area network," IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1913-1924, 2020. +[126] J. Pei, J. Wang, and D. Shi, "Data-driven measurement tampering detection considering spatial-temporal correlations," in Proc. 2019 IEEE 3rd Conf. Energy Internet Energy Syst. Integr. (EI2), 2019, pp. 2641-2646. +[127] T. Lei, C. Gong, G. Chen, M. Ou, K. Yang, and J. Li, “A novel unsupervised framework for time series data anomaly detection via spectrum decomposition,” Knowledge-Based Systems, vol. 280, p. 111002, 2023. +[128] D. Hu, S. Wu, J. Wang, and D. Shi, "Training a dynamic neural network to detect false data injection attacks under multiple unforeseen operating conditions," IEEE Trans. Smart Grid, 2023. +[129] C. Tang, L. Xu, B. Yang, Y. Tang, and D. Zhao, “Gru-based interpretable multivariate time series anomaly detection in industrial control system,” Comput. Secur., vol. 127, p. 103094, 2023. +[130] J. Yu, X. Gao, B. Li, F. Zhai, J. Lu, B. Xue, S. Fu, and C. Xiao, "A filter-augmented auto-encoder with learnable normalization for robust multivariate time series anomaly detection," Neural Networks, vol. 170, pp. 478-493, 2024. +[131] A. Vaswani, "Attention is all you need," Adv. Neural Inf. Process. Syst., 2017. +[132] H. Kang and P. Kang, "Transformer-based multivariate time series anomaly detection using inter-variable attention mechanism," Knowledge-Based Systems, p. 111507, 2024. +[133] J. Kim, H. Kang, and P. Kang, “Time-series anomaly detection with stacked transformer representations and 1d convolutional network,” Eng. Appl. Artif. Intell., vol. 120, p. 105964, 2023. +[134] S. Tuli, G. Casale, and N. R. Jennings, “Tranad: Deep transformer networks for anomaly detection in multivariate time series data,” arXiv preprint arXiv:2201.07284, 2022. +[135] C. Wang and G. Liu, “From anomaly detection to classification with graph attention and transformer for multivariate time series,” Adv. Eng. Inform., vol. 60, p. 102357, 2024. +[136] J. Fan, Z. Wang, H. Wu, D. Sun, J. Wu, and X. Lu, "An adversarial time-frequency reconstruction network for unsupervised anomaly detection," Neural Networks, vol. 168, pp. 44-56, 2023. +[137] Y. Shi, B. Wang, Y. Yu, X. Tang, C. Huang, and J. Dong, "Robust anomaly detection for multivariate time series through temporal GCNs and attention-based vae," Knowledge-Based Systems, vol. 275, p. 110725, 2023. +[138] C. Ding, S. Sun, and J. Zhao, "Mst-gat: A multimodal spatial-temporal graph attention network for time series anomaly detection," Inf. Fusion, vol. 89, pp. 527-536, 2023. +[139] W. Zhu, W. Li, E. R. Dorsey, and J. Luo, "Unsupervised anomaly detection by densely contrastive learning for time series data," Neural Networks, vol. 168, pp. 450-458, 2023. +[140] H. Sun, M. Chen, J. Weng, Z. Liu, and G. Geng, "Anomaly detection for in-vehicle network using cnn-lstm with attention mechanism," IEEE Trans. Veh. Technol., vol. 70, no. 10, pp. 10880-10893, 2021. +[141] T. Le, H. C. Vu, A. Ponchet-Durupt, N. Boudaoud, Z. Cherfi-Boulanger, and T. Nguyen-Trang, "Unsupervised detecting anomalies in multivariate time series by robust convolutional LSTM encoder-decoder (rcled)," Neurocomputing, vol. 592, p. 127791, 2024. + +[142] M. Jin, H. Y. Koh, Q. Wen, D. Zambon, C. Alippi, G. I. Webb, I. King, and S. Pan, “A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection,” IEEE Trans. Pattern Anal. Mach. Intell., 2024. +[143] Y. Wu, H.-N. Dai, and H. Tang, "Graph neural networks for anomaly detection in industrial internet of things," IEEE Internet Things J., vol. 9, no. 12, pp. 9214-9231, 2022. +[144] H. Kim, B. S. Lee, W.-Y. Shin, and S. Lim, “Graph anomaly detection with graph neural networks: Current status and challenges,” IEEE Access, vol. 10, pp. 111820-111829, 2022. +[145] A. Deng and B. Hooi, “Graph neural network-based anomaly detection in multivariate time series,” in Proc. AAAI Conf. Artif. Intell. (AAAI), vol. 35, no. 5, 2021, pp. 4027–4035. +[146] Z. Chen, D. Chen, X. Zhang, Z. Yuan, and X. Cheng, “Learning graph structures with transformer for multivariate time-series anomaly detection in IoT,” IEEE Internet Things J., vol. 9, no. 12, pp. 9179–9189, 2021. +[147] Y. Zheng, H. Y. Koh, M. Jin, L. Chi, K. T. Phan, S. Pan, Y.-P. P. Chen, and W. Xiang, "Correlation-aware spatial-temporal graph learning for multivariate time-series anomaly detection," IEEE Trans. Neural Netw. Learn. Syst., 2023. +[148] Y. Liu, Z. Li, S. Pan, C. Gong, C. Zhou, and G. Karypis, "Anomaly detection on attributed networks via contrastive self-supervised learning," IEEE Trans. Neural Netw. Learn. Syst., vol. 33, no. 6, pp. 2378-2392, 2022. +[149] H. Zhao, Y. Wang, J. Duan, C. Huang, D. Cao, Y. Tong, B. Xu, J. Bai, J. Tong, and Q. Zhang, "Multivariate time-series anomaly detection via graph attention network," in Proc. 2020 IEEE Int. Conf. Data Min. (ICDM)). IEEE, 2020, pp. 841-850. +[150] W. Chen, L. Tian, B. Chen, L. Dai, Z. Duan, and M. Zhou, “Deep variational graph convolutional recurrent network for multivariate time series anomaly detection,” in Int. Conf. Mach. Learn. (ICML). PMLR, 2022, pp. 3621–3633. +[151] S. Han and S. S. Woo, "Learning sparse latent graph representations for anomaly detection in multivariate time series," in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min. (KDD), 2022, pp. 2977-2986. +[152] Y. Tang, L. Zhao, S. Zhang, C. Gong, G. Li, and J. Yang, "Integrating prediction and reconstruction for anomaly detection," Pattern Recognit. Lett., vol. 129, pp. 123-130, 2020. +[153] M. Zheng, J. Man, D. Wang, Y. Chen, Q. Li, and Y. Liu, "Semisupervised multivariate time series anomaly detection for wind turbines using generator scada data," Reliab. Eng. Syst. Saf., vol. 235, p. 109235, 2023. +[154] Y. Wei, J. Jang-Jaccard, W. Xu, F. Sabrina, S. Camtepe, and M. Boulic, "Lstm-autoencoder-based anomaly detection for indoor air quality time-series data," IEEE Sens. J., vol. 23, no. 4, pp. 3787-3800, 2023. +[155] G. Pu, L. Wang, J. Shen, and F. Dong, “A hybrid unsupervised clustering-based anomaly detection method,” Tsinghua Sci. Technol., vol. 26, no. 2, pp. 146–153, 2020. +[156] B. Liu, Y. Xiao, L. Cao, Z. Hao, and F. Deng, "Svdd-based outlier detection on uncertain data," Knowl. Inf. Syst., vol. 34, pp. 597-618, 2013. +[157] A. P. Muniyandi, R. Rajeswari, and R. Rajaram, "Network anomaly detection by cascading k-means clustering and c4. 5 decision tree algorithm," *Proceedia Eng.*, vol. 30, pp. 174-182, 2012. +[158] A. M. Ikotun, A. E. Ezugwu, L. Abualigah, B. Abuhaija, and J. Heming, "K-means clustering algorithms: A comprehensive review, variants analysis, and advances in the era of big data," Inf. Sci., vol. 622, pp. 178-210, 2023. +[159] H. V. Singh, A. Girdhar, and S. Dahiya, “A literature survey based on dbscan algorithms,” in Proc. 2022 6th Int. Conf. Intell. Comput. Control Syst. (ICICCS). IEEE, 2022, pp. 751-758. +[160] F. Murtagh and P. Contreras, “Algorithms for hierarchical clustering: an overview,” Wiley Interdiscip. Rev. Data Min. Knowl. Discov., vol. 2, no. 1, pp. 86–97, 2012. +[161] J. Li, H. Izakian, W. Pedrycz, and I. Jamal, "Clustering-based anomaly detection in multivariate time series data," Appl. Soft Comput., vol. 100, p. 106919, 2021. +[162] A. Markovitz, G. Sharir, I. Friedman, L. Zelnik-Manor, and S. Avidan, "Graph embedded pose clustering for anomaly detection," in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2020, pp. 10539-10547. +[163] S. Qiu, J. Ye, J. Zhao, L. He, L. Liu, E. Bicong, and X. Huang, “Video anomaly detection guided by clustering learning,” Pattern Recognit., vol. 153, p. 110550, 2024. + +[164] I. Kobyzev, S. J. Prince, and M. A. Brubaker, “Normalizing flows: An introduction and review of current methods,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 11, pp. 3964–3979, 2020. +[165] J. Yu, Y. Zheng, X. Wang, W. Li, Y. Wu, R. Zhao, and L. Wu, "Fastflow: Unsupervised anomaly detection and localization via 2d normalizing flows," arXiv preprint arXiv:2111.07677, 2021. +[166] M. Cho, T. Kim, W. J. Kim, S. Cho, and S. Lee, "Unsupervised video anomaly detection via normalizing flows with implicit latent features," Pattern Recognit., vol. 129, p. 108703, 2022. +[167] Q. Zhou, S. He, H. Liu, J. Chen, and W. Meng, "Label-free multivariate time series anomaly detection," IEEE Trans. Knowl. Data Eng., 2024. +[168] E. Dai and J. Chen, "Graph-augmented normalizing flows for anomaly detection of multiple time series," arXiv preprint arXiv:2202.07857, 2022. +[169] Y. Zhou, X. Liang, W. Zhang, L. Zhang, and X. Song, "Vae-based deep svdd for anomaly detection," Neurocomputing, vol. 453, pp. 131-140, 2021. +[170] Z. Zhang and X. Deng, "Anomaly detection using improved deep svdd model with data structure preservation," Pattern Recognit. Lett., vol. 148, pp. 1-6, 2021. +[171] J. Luo, J. Lin, Z. Yang, and H. Liu, "Smd anomaly detection: A self-supervised texture-structure anomaly detection framework," IEEE Trans. Instrum. Meas., vol. 71, pp. 1-11, 2022. +[172] C.-L. Li, K. Sohn, J. Yoon, and T. Pfister, "Cutpaste: Self-supervised learning for anomaly detection and localization," in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2021, pp. 9664-9674. +[173] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, "Anomaly-gan: A data augmentation method for train surface anomaly detection," Expert Syst. Appl., vol. 228, p. 120284, 2023. +[174] Q. Wen, L. Sun, F. Yang, X. Song, J. Gao, X. Wang, and H. Xu, "Time series data augmentation for deep learning: A survey," arXiv preprint arXiv:2002.12478, 2020. +[175] H. Hojjati, T. K. K. Ho, and N. Armanfard, "Self-supervised anomaly detection in computer vision and beyond: A survey and outlook," Neural Networks, vol. 172, p. 106106, 2024. +[176] X. Zhang, M. Xu, and X. Zhou, “Realnet: A feature selection network with realistic synthetic anomaly for anomaly detection,” in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2024, pp. 16699–16708. +[177] F. Van Wyk, Y. Wang, A. Khojandi, and N. Masoud, “Real-time sensor anomaly detection and identification in automated vehicles,” IEEE Trans. Intell. Transp. Syst., vol. 21, no. 3, pp. 1264–1276, 2019. +[178] M. Abouof, R. Mizouni, S. Singh, H. Otrok, and E. Damiani, "Self-supervised online and lightweight anomaly and event detection for IoT devices," IEEE Internet Things J, vol. 9, no. 24, pp. 25 285-25 299, 2022. +[179] X. Zhou, J. Wu, W. Liang, I. Kevin, K. Wang, Z. Yan, L. T. Yang, and Q. Jin, "Reconstructed graph neural network with knowledge distillation for lightweight anomaly detection," IEEE Trans. Neural Netw. Learn. Syst., 2024. +[180] Y. Zhao, G. H. Chen, and Z. Jia, “Tod: GPU-accelerated outlier detection via tensor operations,” arXiv preprint arXiv:2110.14007, 2021. +[181] A. Al-Mazrawe and B. Al-Musawi, “Anomaly detection in cloud network: A review,” in BIO Web of Conferences, vol. 97. EDP Sciences, 2024, p. 00019. +[182] Z. Niu, G. Zhong, and H. Yu, “A review on the attention mechanism of deep learning,” Neurocomputing, vol. 452, pp. 48–62, 2021. +[183] H. Liu, X. Huang, M. Jia, T. Jia, J. Han, Y. Li, and Z. Wu, "Uac-ad: Unsupervised adversarial contrastive learning for anomaly detection on multi-modal data in microservice systems," IEEE Trans. Serv. Comput., 2024. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13195/images/019b206e929dfe093eeec77a1d42acc96182676e7e7c79b8a51a4f5bc4ca29c2.jpg b/data/2025/2503_13xxx/2503.13195/images/019b206e929dfe093eeec77a1d42acc96182676e7e7c79b8a51a4f5bc4ca29c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bfd1b283920c2cb82c5f74011af5215c2946144 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/019b206e929dfe093eeec77a1d42acc96182676e7e7c79b8a51a4f5bc4ca29c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fdeeb1ae826d39395f3f86aeecfa50f370016ffff02bba91fe82018592dd84e +size 41540 diff --git a/data/2025/2503_13xxx/2503.13195/images/01a5c0345962b2f97cd0ef64d890f1c98d4af1b99d38c5a98823b19ce41cfe61.jpg b/data/2025/2503_13xxx/2503.13195/images/01a5c0345962b2f97cd0ef64d890f1c98d4af1b99d38c5a98823b19ce41cfe61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0b27a2b1f332de81d19b2fdd99d4c84110762dd --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/01a5c0345962b2f97cd0ef64d890f1c98d4af1b99d38c5a98823b19ce41cfe61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a99996ecfeb3f336fbe5d365ca46ec840e04401bb86c80fd5cdbfd634a5c4525 +size 131874 diff --git a/data/2025/2503_13xxx/2503.13195/images/0582fc6497eafabed1bd3451b1290a6b09ece498ffa50d1156dd7f0120e507ef.jpg b/data/2025/2503_13xxx/2503.13195/images/0582fc6497eafabed1bd3451b1290a6b09ece498ffa50d1156dd7f0120e507ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..859ecc4a6132c25858c66f79b98a000785adf5e4 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/0582fc6497eafabed1bd3451b1290a6b09ece498ffa50d1156dd7f0120e507ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff2e767c170d1f52ea7bcf75e40547ddbebdc4418be5ec356fe661f8c1ed0a0f +size 13916 diff --git a/data/2025/2503_13xxx/2503.13195/images/09194629b0767868e7df5246fea6e4a2179c838fa9818a8cc9e274fc26ee5ae9.jpg b/data/2025/2503_13xxx/2503.13195/images/09194629b0767868e7df5246fea6e4a2179c838fa9818a8cc9e274fc26ee5ae9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ede6b7b7c3b9c18954ab3a60ab7dde12f658f279 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/09194629b0767868e7df5246fea6e4a2179c838fa9818a8cc9e274fc26ee5ae9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d13fede354a8044f3f238fb566eae2d54d5646ff84f79ba0f7f6af8b217d980e +size 81484 diff --git a/data/2025/2503_13xxx/2503.13195/images/0a1dd839ad5ce27d8ca8ac3dde9e2c28a00c69f076a04ee08e2228fd8db6d21b.jpg b/data/2025/2503_13xxx/2503.13195/images/0a1dd839ad5ce27d8ca8ac3dde9e2c28a00c69f076a04ee08e2228fd8db6d21b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d9d285f2d104b9dc3a77271b8943aab128c601d --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/0a1dd839ad5ce27d8ca8ac3dde9e2c28a00c69f076a04ee08e2228fd8db6d21b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54bfe64b6c91c69887b6a3c70f5ed94a5332af7081cb9a35927558fb70860051 +size 273504 diff --git a/data/2025/2503_13xxx/2503.13195/images/0c83e3768363b3393233d77288befd0f35cb7bcb4b9c89a9f8ee4f51a0f96f26.jpg b/data/2025/2503_13xxx/2503.13195/images/0c83e3768363b3393233d77288befd0f35cb7bcb4b9c89a9f8ee4f51a0f96f26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8497d663f6b28858a776aae1b5263f30f3b875e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/0c83e3768363b3393233d77288befd0f35cb7bcb4b9c89a9f8ee4f51a0f96f26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cda2c4344b272780bcaf0f417bd6e28dea4c1fb178d2a5bb92daafea392b3ca +size 4473 diff --git a/data/2025/2503_13xxx/2503.13195/images/11c660ad378396e7d5a12ad6c18a5c71bc6887df1411ff0efc0d981bd187e2f7.jpg b/data/2025/2503_13xxx/2503.13195/images/11c660ad378396e7d5a12ad6c18a5c71bc6887df1411ff0efc0d981bd187e2f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1662b6958b6d16ae063e39796549aca4b7825b7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/11c660ad378396e7d5a12ad6c18a5c71bc6887df1411ff0efc0d981bd187e2f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:958f07c723cdf43b40462dc66443145f4069a567961af88fa9b060f6096c3cda +size 289514 diff --git a/data/2025/2503_13xxx/2503.13195/images/2a7211cd592f142299cc19606c6bbd41196006c6e92189f745f7644a0f048d6f.jpg b/data/2025/2503_13xxx/2503.13195/images/2a7211cd592f142299cc19606c6bbd41196006c6e92189f745f7644a0f048d6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d126acc9b4f39cfb75524b225710b1219108d33 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/2a7211cd592f142299cc19606c6bbd41196006c6e92189f745f7644a0f048d6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5060819f73f20bfc346b3cce3dc77eef51ebdfb2b34b5cb47d2e0b953f36cdd +size 2615 diff --git a/data/2025/2503_13xxx/2503.13195/images/3330583da44f04aadda892ec09bf36e2ea653e3123da2117b4ea223ff767ce02.jpg b/data/2025/2503_13xxx/2503.13195/images/3330583da44f04aadda892ec09bf36e2ea653e3123da2117b4ea223ff767ce02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..830fcfdfe74decf5f9f2c89e7092863d4099276e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/3330583da44f04aadda892ec09bf36e2ea653e3123da2117b4ea223ff767ce02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1edd65aa953d39797cc7202f97eebfce266efdb07adc60939c683a6117ef1e38 +size 50574 diff --git a/data/2025/2503_13xxx/2503.13195/images/3aab2761cdd89b226e195041dbb9899bba4c9001b81bef2aec2bb5ff3066b14b.jpg b/data/2025/2503_13xxx/2503.13195/images/3aab2761cdd89b226e195041dbb9899bba4c9001b81bef2aec2bb5ff3066b14b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c98eee31432c9e3220697bef85a01be833e38b99 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/3aab2761cdd89b226e195041dbb9899bba4c9001b81bef2aec2bb5ff3066b14b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4277b787187a5aaee81593376519dd1554bdb24e6ddb95433b977020c0c2307 +size 4378 diff --git a/data/2025/2503_13xxx/2503.13195/images/3be6049775f5c5191948d715f5086fae3a8775358da3241c119448a1483ffcc1.jpg b/data/2025/2503_13xxx/2503.13195/images/3be6049775f5c5191948d715f5086fae3a8775358da3241c119448a1483ffcc1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc933a8eea9501f7339286c92cdb2d1d5607e64a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/3be6049775f5c5191948d715f5086fae3a8775358da3241c119448a1483ffcc1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:314e6a47da5633c76eb094d7ed322d8bb8752c6f6380db63152fb399b7569ba2 +size 4230 diff --git a/data/2025/2503_13xxx/2503.13195/images/599f2b99fd60660c0b7aed4125907dd6087f566ef9745745c026c30544caff37.jpg b/data/2025/2503_13xxx/2503.13195/images/599f2b99fd60660c0b7aed4125907dd6087f566ef9745745c026c30544caff37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b05c7fd84ba44526d026a53db356bb93b51c1866 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/599f2b99fd60660c0b7aed4125907dd6087f566ef9745745c026c30544caff37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d9c3f9b66a18263ea4e91781dbc89b860e3e581cfa9e0268c37229cbef247cd +size 5404 diff --git a/data/2025/2503_13xxx/2503.13195/images/6daf740dc031ec066115a38562e4dd3bb6bddacf671df3dc996ddae37458be6a.jpg b/data/2025/2503_13xxx/2503.13195/images/6daf740dc031ec066115a38562e4dd3bb6bddacf671df3dc996ddae37458be6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ab6574ae126588b808029c00224c7dfac5423af --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/6daf740dc031ec066115a38562e4dd3bb6bddacf671df3dc996ddae37458be6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6a3a795163985b0b6f7f37e42df1264ff1f4bf7f079883343a4f2e4b83e4a3 +size 6762 diff --git a/data/2025/2503_13xxx/2503.13195/images/7308349784488dbf457c989800ee8b1dfb8275a27619297e48677fe456758a50.jpg b/data/2025/2503_13xxx/2503.13195/images/7308349784488dbf457c989800ee8b1dfb8275a27619297e48677fe456758a50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b9880114061973467877c6233b2382532a0a46c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/7308349784488dbf457c989800ee8b1dfb8275a27619297e48677fe456758a50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23ef77bfdc213a93824d2b1f1793219e8be3d521b84d9a5c93d6527280cff763 +size 5356 diff --git a/data/2025/2503_13xxx/2503.13195/images/7509a938836ca50d09aa01364c29c0e376376912d0ca6818c1dc0151a063308a.jpg b/data/2025/2503_13xxx/2503.13195/images/7509a938836ca50d09aa01364c29c0e376376912d0ca6818c1dc0151a063308a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42f829b8c6e482947ae0644b84cb5e4d5095bec8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/7509a938836ca50d09aa01364c29c0e376376912d0ca6818c1dc0151a063308a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f415f6865f48be334463d830aade3a5d9901bd701becfcbeb5a793ea7ac3bde +size 41937 diff --git a/data/2025/2503_13xxx/2503.13195/images/776e31311c1c4c6521d678b5659cbccf97515cff7d507f59b7a11d83bd281e30.jpg b/data/2025/2503_13xxx/2503.13195/images/776e31311c1c4c6521d678b5659cbccf97515cff7d507f59b7a11d83bd281e30.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9203cf9a19e31bb749ebd81b54ef805a32c80fe --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/776e31311c1c4c6521d678b5659cbccf97515cff7d507f59b7a11d83bd281e30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5075f7ad5eea36006dacc7ba5dd5467b6e8095234e18c7c51d01c169ee77fb32 +size 72758 diff --git a/data/2025/2503_13xxx/2503.13195/images/7a3d189f7e4d2c78f15db015f6396740178b4f4fadb8853fe3a70a7dbd4e03f5.jpg b/data/2025/2503_13xxx/2503.13195/images/7a3d189f7e4d2c78f15db015f6396740178b4f4fadb8853fe3a70a7dbd4e03f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..315136f616cb50238edd07a624790bf5a78c1c87 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/7a3d189f7e4d2c78f15db015f6396740178b4f4fadb8853fe3a70a7dbd4e03f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e364ca36abee9095ed36b6da266c7485044ea420b087aa101464185cf73fd17a +size 2453 diff --git a/data/2025/2503_13xxx/2503.13195/images/82d18c91a06f87e742d55753cfa9e1a8bc54048a8c709dd6c0549a643b62acec.jpg b/data/2025/2503_13xxx/2503.13195/images/82d18c91a06f87e742d55753cfa9e1a8bc54048a8c709dd6c0549a643b62acec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c40e7bc61ffe5a2599c355a3975c03d2ad5b07b --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/82d18c91a06f87e742d55753cfa9e1a8bc54048a8c709dd6c0549a643b62acec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea83315c4dd720358b0fd00db97b6947c8bcffe13f53e294b58ad1ae854608db +size 7611 diff --git a/data/2025/2503_13xxx/2503.13195/images/833120e573f186fcf6c195b1480a6112e6e1f60a22837b1a5310d21419b89868.jpg b/data/2025/2503_13xxx/2503.13195/images/833120e573f186fcf6c195b1480a6112e6e1f60a22837b1a5310d21419b89868.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f71c0838ea9f83ee8e32d96679db6a64efebaa16 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/833120e573f186fcf6c195b1480a6112e6e1f60a22837b1a5310d21419b89868.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd0faff92c26e30ac5f9de4d8d66211acecd4fe280392527d0923ae923de5a80 +size 10164 diff --git a/data/2025/2503_13xxx/2503.13195/images/880d55c7ec7dc69d5f5d9f1f2b32b3c2153e316b56c5560359f5852c0d17b74d.jpg b/data/2025/2503_13xxx/2503.13195/images/880d55c7ec7dc69d5f5d9f1f2b32b3c2153e316b56c5560359f5852c0d17b74d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4594ab577c01cb79407f5d1b3e55d8a886336919 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/880d55c7ec7dc69d5f5d9f1f2b32b3c2153e316b56c5560359f5852c0d17b74d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d46db19bbf737327ca95fea9918ea578d488f2b824de8b55f41d7b3d38c6cbb9 +size 12978 diff --git a/data/2025/2503_13xxx/2503.13195/images/956f8a63368e4835961a7080cbc90098d7844aea31a82a4bd78c0123a0e5a997.jpg b/data/2025/2503_13xxx/2503.13195/images/956f8a63368e4835961a7080cbc90098d7844aea31a82a4bd78c0123a0e5a997.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16b227ea7ca90510bf9aa1634c917ca84d107440 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/956f8a63368e4835961a7080cbc90098d7844aea31a82a4bd78c0123a0e5a997.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d21647be4c6357b20dbf2fff55bed684c265dbba772e38a2bdfac2d474530ca +size 5642 diff --git a/data/2025/2503_13xxx/2503.13195/images/99343b86e303b16ed72ac102346e1a4f94400e20ddc1bebcd295923286e32817.jpg b/data/2025/2503_13xxx/2503.13195/images/99343b86e303b16ed72ac102346e1a4f94400e20ddc1bebcd295923286e32817.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e635f0ef9e71bc2fb2156ee1bfb7aeeb83cbb00 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/99343b86e303b16ed72ac102346e1a4f94400e20ddc1bebcd295923286e32817.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6306600aeca923e8f481388713f21468cb44294d7c9e24666912e66c7198c474 +size 177982 diff --git a/data/2025/2503_13xxx/2503.13195/images/9ab5864bb5a00f300f042438425cf0cfc8335ed7c480d2804fa41efc69b98043.jpg b/data/2025/2503_13xxx/2503.13195/images/9ab5864bb5a00f300f042438425cf0cfc8335ed7c480d2804fa41efc69b98043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0df20691dae722ba0a9ae466cad20528d4afc2c --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/9ab5864bb5a00f300f042438425cf0cfc8335ed7c480d2804fa41efc69b98043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b460be22e607fdf6b205229ee7202fef845f0ca1c72a65423826c36c1a32e4bd +size 4159 diff --git a/data/2025/2503_13xxx/2503.13195/images/a55eb6817213979a54e6dc36acf538fe4e0b2f0ec1f3c9f8bb2d32bba446d192.jpg b/data/2025/2503_13xxx/2503.13195/images/a55eb6817213979a54e6dc36acf538fe4e0b2f0ec1f3c9f8bb2d32bba446d192.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6abb92e1a1da04da2c0487ae76144559a5739ff8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/a55eb6817213979a54e6dc36acf538fe4e0b2f0ec1f3c9f8bb2d32bba446d192.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc209809f51b23736719ed8a4dffae1a8ed26f5e1d6c72d2dbfc55586b8200c1 +size 3461 diff --git a/data/2025/2503_13xxx/2503.13195/images/ab7aedd616ec4505c3fc87c94f267fe1e3ff25684d846743f04d6cdec18a9037.jpg b/data/2025/2503_13xxx/2503.13195/images/ab7aedd616ec4505c3fc87c94f267fe1e3ff25684d846743f04d6cdec18a9037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04c8cc8a9ad9a40e6bcf01c7e73cd7a50702791e --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/ab7aedd616ec4505c3fc87c94f267fe1e3ff25684d846743f04d6cdec18a9037.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9dbd939b5a93f885c58bd75bbe406c5466574f7b9d06a860dca69e468e9830e +size 18136 diff --git a/data/2025/2503_13xxx/2503.13195/images/ad8391976e6f89cfbc05f96fe208d27610a19c9217688d1a8d523a50dd83d456.jpg b/data/2025/2503_13xxx/2503.13195/images/ad8391976e6f89cfbc05f96fe208d27610a19c9217688d1a8d523a50dd83d456.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f02d45bf7ee0fe77d94b0a791b7ba82e2bd96be --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/ad8391976e6f89cfbc05f96fe208d27610a19c9217688d1a8d523a50dd83d456.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfb8ce880e370f0776f01e838ce557e15385ba1f55e7d0c9d543d111128cfe16 +size 5976 diff --git a/data/2025/2503_13xxx/2503.13195/images/c152336502e48e3f718bf9788652d8fed7e7c775782ddbca7740d63108b86f8a.jpg b/data/2025/2503_13xxx/2503.13195/images/c152336502e48e3f718bf9788652d8fed7e7c775782ddbca7740d63108b86f8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3806117ef969f719d206476c752704d83b8ba650 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/c152336502e48e3f718bf9788652d8fed7e7c775782ddbca7740d63108b86f8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38554de4b50729c3dad9de2fdda15663c8fa1dfb802722055fdd2d4e0ba4d136 +size 8565 diff --git a/data/2025/2503_13xxx/2503.13195/images/d588d6f350589007811fb9b7c0e386db40420189cc2b21050b623b552529bf5c.jpg b/data/2025/2503_13xxx/2503.13195/images/d588d6f350589007811fb9b7c0e386db40420189cc2b21050b623b552529bf5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d78ca8691b47041188077b1a28e4ec55272e016 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/d588d6f350589007811fb9b7c0e386db40420189cc2b21050b623b552529bf5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9472d1c23c2dfc472568d0a5effa1860e4e8830b2926a692d59193a29badd34 +size 3587 diff --git a/data/2025/2503_13xxx/2503.13195/images/e1d772cb17cdcc7baea708f3a4c2d5a2999a5ea4551a1a3a2e14e9eef439af94.jpg b/data/2025/2503_13xxx/2503.13195/images/e1d772cb17cdcc7baea708f3a4c2d5a2999a5ea4551a1a3a2e14e9eef439af94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f08fcd3cd09598fef7a2231173f5e3eff201c40 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/e1d772cb17cdcc7baea708f3a4c2d5a2999a5ea4551a1a3a2e14e9eef439af94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab0b84aecabb1e8cbea7fd06d5ea9634974556a7c10810b805ace1b84ff441f8 +size 7296 diff --git a/data/2025/2503_13xxx/2503.13195/images/fd57f4067bb6f3c6e9b25b5ccfc25170dec2d8afbbeb522aa5c35f6a09e5a7e3.jpg b/data/2025/2503_13xxx/2503.13195/images/fd57f4067bb6f3c6e9b25b5ccfc25170dec2d8afbbeb522aa5c35f6a09e5a7e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e03c94f3977fafca7de94187518ff68c673cb044 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/images/fd57f4067bb6f3c6e9b25b5ccfc25170dec2d8afbbeb522aa5c35f6a09e5a7e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f86d1501228ad57007392c04fb8a82d9b144d27c62637291228a5032952b6bbf +size 21095 diff --git a/data/2025/2503_13xxx/2503.13195/layout.json b/data/2025/2503_13xxx/2503.13195/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0f95a3c5c05a49d07e943254f296deba125a66c2 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13195/layout.json @@ -0,0 +1,17635 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 56, + 515, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 56, + 515, + 111 + ], + "spans": [ + { + "bbox": [ + 94, + 56, + 515, + 111 + ], + "type": "text", + "content": "Deep Learning Advancements in Anomaly Detection: A Comprehensive Survey" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "text", + "content": "Haoqi Huang, Ping Wang" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "inline_equation", + "content": "\\text{©}" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "text", + "content": ", Fellow, IEEE, Jianhua Pei" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "inline_equation", + "content": "\\text{©}" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "text", + "content": ", Graduate Student Member, IEEE, Jiacheng Wang" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "inline_equation", + "content": "\\text{©}" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "text", + "content": ", Shahren Alexanian, and Dusit Niyato" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "inline_equation", + "content": "\\text{©}" + }, + { + "bbox": [ + 56, + 117, + 553, + 144 + ], + "type": "text", + "content": ", Fellow, IEEE" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 186, + 301, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 301, + 407 + ], + "type": "text", + "content": "Abstract—The rapid expansion of data from diverse sources has made anomaly detection (AD) increasingly essential for identifying unexpected observations that may signal system failures, security breaches, or fraud. As datasets become more complex and high-dimensional, traditional detection methods struggle to effectively capture intricate patterns. Advances in deep learning have made AD methods more powerful and adaptable, improving their ability to handle high-dimensional and unstructured data. This survey provides a comprehensive review of over 180 recent studies, focusing on deep learning-based AD techniques. We categorize and analyze these methods into reconstruction-based and prediction-based approaches, highlighting their effectiveness in modeling complex data distributions. Additionally, we explore the integration of traditional and deep learning methods, highlighting how hybrid approaches combine the interpretability of traditional techniques with the flexibility of deep learning to enhance detection accuracy and model transparency. Finally, we identify open issues and propose future research directions to advance the field of AD. This review bridges gaps in existing literature and serves as a valuable resource for researchers and practitioners seeking to enhance AD techniques using deep learning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 412, + 301, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 412, + 301, + 443 + ], + "spans": [ + { + "bbox": [ + 45, + 412, + 301, + 443 + ], + "type": "text", + "content": "Index Terms—Anomaly detection, deep learning, data reconstruction and prediction, Internet of things, comprehensive survey." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 462, + 215, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 462, + 215, + 473 + ], + "spans": [ + { + "bbox": [ + 132, + 462, + 215, + 473 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 479, + 300, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 479, + 300, + 598 + ], + "spans": [ + { + "bbox": [ + 45, + 479, + 300, + 598 + ], + "type": "text", + "content": "An anomaly refers to an observation that significantly deviates from the expected behavior in a system, often appearing unusual, inconsistent, or unexpected [1]. Despite the fact that outliers typically constitute only a small fraction of a dataset, they are often highly crucial because they carry important information and can reveal critical insights during analysis. Consequently, anomaly detection (AD) is the process of identifying such anomalous observations using various methods and algorithms, which aids decision-makers in better understanding data patterns and behaviors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 599, + 301, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 599, + 301, + 647 + ], + "spans": [ + { + "bbox": [ + 45, + 599, + 301, + 647 + ], + "type": "text", + "content": "The rapid development of the Internet of Things (IoT) has revolutionized the way data is generated, collected, and analyzed across various domains. IoT systems leverage a wide array of interconnected sensors and devices to collect massive" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 657, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 657, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 657, + 301, + 748 + ], + "type": "text", + "content": "H. Huang, P. Wang and S. Alexanian are with the Lassonde School of Engineering, York University, Toronto, ON M3J 1P3, Canada (e-mail:joycehhq@yorku.ca; pingw@yorku.ca; yu263319@my.yorku.ca). J. Pei is with the State Key Laboratory of Advanced Electromagnetic Technology, School of Electrical and Electronic Engineering, Huazhong University of Science and Technology, Wuhan 430074, China (e-mail: jianhuapei@hust.edu.cn). J. Wang and D. Niyato are with the School of Computer Science and Engineering, Nanyang Technological University, Singapore (e-mail: jcwang_cq@foxmail.com; dniyato@ntu.edu.sg)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 186, + 564, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 564, + 305 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 564, + 305 + ], + "type": "text", + "content": "amounts of real-time data in diverse applications, including smart cities [2], industrial automation [3], healthcare [4], and transportation [5], etc. This proliferation of sensor data introduces unprecedented opportunities for enhancing operational efficiency and decision-making processes. However, it also presents significant challenges, as the data is often high-dimensional, noisy, and prone to anomalies caused by faulty sensors, environmental changes, or malicious attacks [6]. Detecting anomalies in data is critical for ensuring system reliability, security, and performance [7]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 306, + 564, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 306, + 564, + 735 + ], + "spans": [ + { + "bbox": [ + 307, + 306, + 564, + 735 + ], + "type": "text", + "content": "AD methodologies can be systematically classified according to various criteria. One prominent classification framework differentiates these methods into supervised, semi-supervised, and unsupervised approaches, predicated on the availability and nature of labeled data [8]. Supervised learning-based AD algorithms necessitate a fully labeled dataset, where each data point is explicitly annotated as either normal or anomalous. This labeling process facilitates the model's ability to discern and learn the underlying characteristics that differentiate anomalous instances from normal ones, thereby enhancing its detection accuracy. Semi-supervised learning-based methods, on the other hand, operate with a dataset comprising a substantial volume of unlabeled data alongside a smaller subset of labeled instances. These labels may include both normal and anomalous data, or in certain cases, solely normal instances [9]. In scenarios where only normal data is labeled, the semi-supervised approach converges towards unsupervised methodologies, as the model predominantly learns normal behavior patterns and identifies anomalies as deviations from these learned patterns. Unsupervised learning-based AD methods eschew the need for labeled data entirely, leveraging the intrinsic structural properties of the dataset to autonomously identify anomalies [10] [11]. In practical applications, a significant portion of contemporary AD research gravitates towards unsupervised methods [12]. This preference is largely driven by the substantial imbalance between the number of normal instances and anomalies, which complicates the acquisition of a sufficiently large labeled dataset required for effective supervised learning [13]. Moreover, anomalies are frequently correlated with critical failures or hazardous events, rendering the labeling process both costly and logistically challenging. Another key classification criterion is the nature of the dataset, particularly whether it comprises time-series data, which distinguishes AD methods into time-series [14] and non-temporal approaches. The applications of time-series and non-temporal AD will be discussed in detail in Section III." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "content": "In addition to the temporal aspect, AD techniques can also" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.13195v1 [cs.LG] 17 Mar 2025" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 355 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 355 + ], + "type": "text", + "content": "be categorized based on their underlying paradigms: traditional methods and deep learning-based methods. Traditional techniques encompass statistical approaches [15], distance-based methods [16], and clustering algorithms [17]. These approaches generally rely on estimating the probability distribution of normal data to predict anomalies. However, since the early 20th century, the fields of data science, machine learning, deep learning, and artificial intelligence have witnessed exponential growth, with significant implications for AD [18]. Particularly in recent years, the advent of soft-computing techniques has significantly influenced the development of deep learning-based methods. These techniques are characterized by their ability to handle imprecise, uncertain, and nonlinear data, making them highly suitable for applications involving deep learning. Consequently, deep learning-based methods have been propelled to the forefront due to their superior capability to learn expressive representations of complex data, including high-dimensional, temporal, spatial, and graph-structured data [19]. By proficiently modeling intricate patterns and relationships inherent in the data, deep learning approaches have proven remarkably effective in identifying anomalies across a wide range of challenging and complex datasets. This paper concentrates specifically on AD methods based on deep learning models, with the objective of providing a comprehensive review of this rapidly evolving field." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 363, + 299, + 386 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 363, + 299, + 386 + ], + "spans": [ + { + "bbox": [ + 45, + 363, + 299, + 386 + ], + "type": "text", + "content": "A. Contrasting Traditional Models with Deep Learning Models" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 390, + 301, + 725 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 390, + 301, + 725 + ], + "spans": [ + { + "bbox": [ + 45, + 390, + 301, + 725 + ], + "type": "text", + "content": "Traditional AD methods [20], such as statistical techniques, clustering algorithms [21], and Principal Component Analysis (PCA) [22], have long been established as reliable tools across a wide spectrum of applications due to their simplicity, interpretability, and low computational overhead. These characteristics make them particularly promising in scenarios where model transparency and efficiency are paramount. Statistical techniques, for example, provide clear, rule-based mechanisms for detecting anomalies, while clustering algorithms are effective in grouping similar data points and isolating outliers in relatively low-dimensional datasets. Similarly, PCA has been widely adopted for dimensionality reduction, enabling effective AD by isolating principal components that capture major variations in the data [17]. Despite these advantages, traditional methods often encounter significant limitations when applied to modern, complex datasets. Statistical techniques generally assume that data adheres to specific distributions. However, this assumption is rarely met in real-world scenarios, where data often exhibits non-Gaussian distributions and heavy tails. Clustering-based methods, while useful in many contexts, check to accurately define clusters, particularly when anomalies do not present clear separability from normal data. PCA, on the other hand, relies heavily on the assumption of linearity and extensive feature engineering, making it less effective at capturing the nuanced, non-linear patterns prevalent in high-dimensional datasets [22]. These constraints have prompted a shift towards more advanced approaches capable of handling the increasing complexity of modern data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": "In contrast, deep learning models have recently emerged as a powerful alternative, addressing many of the shortcom" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 313, + 53, + 557, + 547 + ], + "blocks": [ + { + "bbox": [ + 313, + 53, + 557, + 547 + ], + "lines": [ + { + "bbox": [ + 313, + 53, + 557, + 547 + ], + "spans": [ + { + "bbox": [ + 313, + 53, + 557, + 547 + ], + "type": "image", + "image_path": "01a5c0345962b2f97cd0ef64d890f1c98d4af1b99d38c5a98823b19ce41cfe61.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 555, + 430, + 567 + ], + "lines": [ + { + "bbox": [ + 309, + 555, + 430, + 567 + ], + "spans": [ + { + "bbox": [ + 309, + 555, + 430, + 567 + ], + "type": "text", + "content": "Fig. 1. The anatomy of this survey." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 592, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 564, + 749 + ], + "type": "text", + "content": "ings inherent in traditional approaches. Deep neural networks (DNNs) possess the capacity to autonomously learn complex patterns and hierarchical representations from raw data, thereby obviating the need for labor-intensive feature engineering [23]. This characteristic is particularly advantageous in the detection of subtle and multifaceted anomalies that might elude traditional methods [24]. By leveraging their multilayered architectures, deep learning models excel in processing high-dimensional and unstructured data, such as images, videos, and text, which are often challenging for conventional methods to handle effectively [25]. These models are adept at capturing non-linear relationships and interactions within the data, offering a more flexible and robust framework for AD" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 90 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 90 + ], + "type": "text", + "content": "[26]. Consequently, there has been a significant shift away from purely traditional AD techniques towards the adoption of deep learning methodologies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 91, + 301, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 91, + 301, + 222 + ], + "spans": [ + { + "bbox": [ + 45, + 91, + 301, + 222 + ], + "type": "text", + "content": "Nonetheless, it is crucial to acknowledge that traditional AD models retain certain advantages, notably in their simplicity, interpretability, and lower computational overheads [27]. These characteristics make them particularly appealing in scenarios where model transparency and computational efficiency are crucial. In recognition of these strengths, Section V of this paper will introduce and discuss various existing approaches that integrate traditional methods with deep learning techniques. These hybrid methods aim to leverage the strengths of both paradigms, resulting in more robust and efficient AD systems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 236, + 205, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 236, + 205, + 248 + ], + "spans": [ + { + "bbox": [ + 45, + 236, + 205, + 248 + ], + "type": "text", + "content": "B. Comparison With Existing Surveys" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 251, + 302, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 251, + 302, + 550 + ], + "spans": [ + { + "bbox": [ + 45, + 251, + 302, + 550 + ], + "type": "text", + "content": "In recent years, the field of AD has seen a surge in research, particularly with the advent of deep learning methods. Numerous surveys have been published, each attempting to provide a comprehensive overview of the field. However, many of these surveys focus on broader historical developments or cover deep learning techniques only up to a certain point in time. For example, surveys such as [19], [28], [29], and [23] primarily cover techniques developed up to 2020. While these surveys are valuable, they do not reflect the most recent advancements in the field. Furthermore, specific models such as Generative Adversarial Network (GAN)-based AD have been explored in-depth by studies [30], [31], [32], [33], and [34]. However, these studies primarily address foundational approaches and lack coverage of advanced techniques like conditional GANs, cycle-consistent GANs, and GANs integrated with self-supervised learning. Emerging hybrid models, combining GANs with Variational Autoencoders (VAEs) or autoencoders for improved robustness, are also underrepresented. In contrast, our survey covers the literature from 2019 to 2024, providing a timely and comprehensive overview of the latest advancements. By focusing on recent trends and evolving techniques, including enhanced architectures and hybrid frameworks, our work offers a more current perspective, bridging existing gaps and guiding future research directions in AD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 564, + 178, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 564, + 178, + 575 + ], + "spans": [ + { + "bbox": [ + 46, + 564, + 178, + 575 + ], + "type": "text", + "content": "C. Contributions and Structure" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 579, + 300, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 579, + 300, + 674 + ], + "spans": [ + { + "bbox": [ + 45, + 579, + 300, + 674 + ], + "type": "text", + "content": "This survey systematically reviews over 160 recent research papers on AD, including publications from leading journals (IEEE, ACM, Springer, Elsevier) and top-tier conferences (AAAI, CCS, ICCV) spanning from 2019 to 2024. By focusing on cutting-edge advancements in deep learning-based methods, this survey ensures a comprehensive and up-to-date overview of the field. The contributions of this survey are summarized as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 677, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 677, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 56, + 677, + 301, + 750 + ], + "type": "text", + "content": "- This survey addresses gaps in prior surveys by highlighting advanced techniques that were previously underexplored, including conditional GANs, cycle-consistent GANs, and hybrid frameworks combining GANs with VAEs. These models are introduced and analyzed to demonstrate their strengths and weaknesses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 320, + 55, + 564, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 55, + 564, + 125 + ], + "spans": [ + { + "bbox": [ + 320, + 55, + 564, + 125 + ], + "type": "text", + "content": "- This survey provides a detailed comparison of reconstruction-based and prediction-based methods. To enhance clarity and usability, we summarize key strengths, weaknesses, and applications in structured tables, offering readers insights into the trade-offs of different models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 320, + 126, + 564, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 126, + 564, + 196 + ], + "spans": [ + { + "bbox": [ + 320, + 126, + 564, + 196 + ], + "type": "text", + "content": "- Recognizing the strengths of traditional methods, this survey explores their integration with deep learning models. Hybrid approaches, such as clustering, normalizing flows, and support vector data descriptions combined with deep learning, are analyzed to address complex challenges in AD." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 201, + 566, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 201, + 566, + 370 + ], + "spans": [ + { + "bbox": [ + 308, + 201, + 566, + 370 + ], + "type": "text", + "content": "The organization of this survey is shown in Fig.1. Section II provides an overview of data characteristics and anomaly types, followed by a discussion of common data processing challenges and mitigation strategies critical to effective AD. Section III explores the related applications of AD. Section IV categorizes and analyzes deep learning methods for AD, highlighting their effectiveness and limitations. Section V discusses the integration of traditional methods with deep learning, including clustering methods, normalizing flows, and support vector data descriptions. Section VI highlights open issues and future directions, such as challenges in data collection, computational complexity, explainability, and handling diverse anomaly types. Finally, Section VII concludes the survey with a summary and potential directions for future research." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 332, + 388, + 539, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 388, + 539, + 399 + ], + "spans": [ + { + "bbox": [ + 332, + 388, + 539, + 399 + ], + "type": "text", + "content": "II. DATA CHARACTERISTICS AND CHALLENGES" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 408, + 507, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 408, + 507, + 421 + ], + "spans": [ + { + "bbox": [ + 308, + 408, + 507, + 421 + ], + "type": "text", + "content": "A. Overview of Input Data and Anomaly Types" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 426, + 566, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 566, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 566, + 750 + ], + "type": "text", + "content": "In AD, input data presents unique challenges due to its structure, dimensionality, and temporal nature. Different types of data require specialized techniques to effectively identify anomalies, and the nature of anomalies themselves can vary greatly depending on the domain and data format [28]. For instance, visual data such as images and videos may exhibit anomalies associated with spatial or temporal inconsistencies, while time series data often involves anomalies related to trends or sudden changes in values over time. To better understand these variations, we first categorize data into textual, audio, image, and video formats, highlighting their respective characteristics and the challenges they pose for AD. Beyond this classification, data can also be viewed through the lens of temporal dependencies, distinguishing between time-series data, which captures sequential patterns over time, and nontemporal data, where observations are independent of temporal order. This dual perspective provides a comprehensive framework for analyzing how different types of anomalies manifest across various data formats. Furthermore, the nature of anomalies themselves can vary depending on the data format. Point anomalies, sequence anomalies, and outliers may all manifest differently across different data types and structures. Understanding these distinctions is essential for selecting the appropriate AD techniques [29], as a deep understanding of data characteristics and anomaly types ensures that detection methods are effectively tailored to capture the specific behaviors and patterns indicative of anomalies." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 55, + 194, + 67 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 55, + 194, + 67 + ], + "spans": [ + { + "bbox": [ + 56, + 55, + 194, + 67 + ], + "type": "text", + "content": "1) Categorization by Data Type:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 71, + 301, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 301, + 178 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 301, + 178 + ], + "type": "text", + "content": "- Textual Data: Textual data consists of sequences of discrete symbols, such as characters, words, or phrases, structured in a linear format. Unlike other data types, textual data conveys information through syntactic and semantic relationships. It can be found in various forms, including documents, chat messages, emails, and system logs. Anomalies in textual data may appear as irregular word sequences, syntactic inconsistencies, missing or misplaced words, or semantically incoherent phrases." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 178, + 301, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 178, + 301, + 310 + ], + "spans": [ + { + "bbox": [ + 56, + 178, + 301, + 310 + ], + "type": "text", + "content": "- Audio Data: Audio data captures variations in amplitude and frequency over time, representing spoken language, environmental sounds, or machine signals. It can be stored as waveforms or transformed into frequency-domain representations like spectrograms. Unlike textual data, audio data is continuous and often requires spectral analysis to extract meaningful patterns. Anomalies in audio data manifest as unexpected distortions, unusual frequency shifts, missing segments, or abnormal sound patterns caused by malfunctioning equipment, altered speech, or environmental noise." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 310, + 301, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 310, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 310, + 301, + 430 + ], + "type": "text", + "content": "- Image Data: Image data consists of two-dimensional pixel grids, where each pixel represents intensity or color information. Unlike sequential data, image data encodes spatial relationships, capturing textures, shapes, and patterns. Image anomalies often appear as distortions, irregular textures, missing components, or unexpected objects that deviate from normal patterns. For instance, these can result from manufacturing defects, medical imaging errors, or environmental changes in satellite imagery." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 430, + 301, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 430, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 56, + 430, + 301, + 561 + ], + "type": "text", + "content": "- Video Data: Video data extends image data by incorporating a temporal dimension, forming sequences of frames over time. Each frame within a video is an image, and the relationships between frames capture motion and dynamic interactions [35]. Unlike static images, video data requires modeling temporal dependencies, making AD more complex. Anomalies in video data include irregular movements, unexpected scene transitions, or unusual object behaviors, which are commonly observed in surveillance footage, traffic monitoring, and activity recognition." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 561, + 301, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 561, + 301, + 681 + ], + "spans": [ + { + "bbox": [ + 56, + 561, + 301, + 681 + ], + "type": "text", + "content": "- Tabular Data: Tabular data consists of structured records organized in rows and columns, where each row represents an entity or event, and each column corresponds to an attribute. This type of data is widely used in databases, spreadsheets, financial records, and sensor logs. Unlike the other data types, tabular data can contain numerical, categorical, or mixed-format information. Anomalies in tabular data include missing values, unexpected categorical labels, numerical outliers, or inconsistent relationships between attributes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 685, + 254, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 685, + 254, + 696 + ], + "spans": [ + { + "bbox": [ + 56, + 685, + 254, + 696 + ], + "type": "text", + "content": "2) Categorization by Temporal Characteristics:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 56, + 700, + 301, + 750 + ], + "type": "text", + "content": "- Time-based data: Time-based data can be represented as a sequence of observations recorded over time, and it may consist of either a single variable (univariate) or multiple variables (multivariate). We can generalize the" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "spans": [ + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": "representation of both univariate and multivariate time series using the following formula: " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "X = \\{x_{t,j}\\}_{t\\in T,j\\in J}" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "t\\in T" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " denotes the time index, with " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " representing a specific time step and " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " being the set of all time steps in the dataset. Similarly, " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "j\\in J" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " represents the dimension or variable index, where " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " refers to a particular variable and " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " is the set of all variables or dimensions in the data. When " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "|J| = 1" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": ", the series is univariate, meaning there is only one variable observed over time. In contrast, when " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "|J| > 1" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": ", the series is multivariate, indicating that multiple variables are recorded simultaneously at each time step. Each observation " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "x_{t,j}" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": " corresponds to the value of the " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": "-th variable at time " + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 328, + 55, + 564, + 390 + ], + "type": "text", + "content": ". Among the five previously introduced data types, audio, video, and certain types of textual and tabular data are inherently time-based. Audio data is naturally sequential, with sound signals evolving over time, making anomalies such as distortions or frequency shifts dependent on temporal patterns. Video data extends image sequences over time, requiring the detection of abnormal object movements, scene transitions, or motion inconsistencies. Textual data, such as streaming logs, system event records, or chat conversations, also exhibits temporal dependencies, where anomalies may appear as unexpected event sequences or irregular timing between log entries. Similarly, tabular data in the form of financial transactions, sensor readings, or stock prices follows a time-series format, where anomalies may indicate fraud, equipment failure, or unusual market behaviors." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 320, + 390, + 564, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 390, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 320, + 390, + 564, + 665 + ], + "type": "text", + "content": "- Non-temporal data: Non-temporal data refers to observations that lack a temporal sequence, where the relationships between data points are independent of time. Such data is prevalent across industries that rely on static datasets or event-based observations. AD in non-temporal data focuses on identifying irregularities by analyzing data characteristics, patterns, or statistical properties rather than temporal dependencies. This process is crucial for uncovering hidden risks, fraudulent activities, or system malfunctions in contexts where time is not a defining factor. Among the five data types, image and certain types of tabular data are the most common forms of non-temporal data. Image data, such as medical scans, industrial defect detection images, or satellite photos, captures spatial relationships but does not depend on a temporal sequence. Anomalies in such data typically appear as unusual textures, distortions, or unexpected objects. Tabular data, when not used for time-series analysis, is also non-temporal, such as customer records, product attributes, or static financial datasets. In these cases, AD focuses on identifying outliers, inconsistencies, or unusual relationships between different features rather than changes over time." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 319, + 670, + 419, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 670, + 419, + 682 + ], + "spans": [ + { + "bbox": [ + 319, + 670, + 419, + 682 + ], + "type": "text", + "content": "3) Types of Anomalies:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 319, + 689, + 564, + 748 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 319, + 689, + 564, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 689, + 564, + 736 + ], + "spans": [ + { + "bbox": [ + 319, + 689, + 564, + 736 + ], + "type": "text", + "content": "- Point Anomalies: A single data point deviates significantly from the expected behavior in the dataset. These are common across both time-based and non-time-based data, representing sudden outliers or unusual values." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 319, + 736, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 736, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 319, + 736, + 564, + 748 + ], + "type": "text", + "content": "- Contextual Anomalies: A data point is considered" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 65, + 55, + 301, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 55, + 301, + 114 + ], + "spans": [ + { + "bbox": [ + 65, + 55, + 301, + 114 + ], + "type": "text", + "content": "anomalous only when it is analyzed within a specific context or surrounding data. In time-based data, this could involve seasonal trends or time-of-day variations, whereas in non-time-based data, it could depend on relationships between variables." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 115, + 301, + 246 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 57, + 115, + 301, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 115, + 301, + 175 + ], + "spans": [ + { + "bbox": [ + 57, + 115, + 301, + 175 + ], + "type": "text", + "content": "- Subsequence Anomalies: A contiguous sequence of data points behaves abnormally, typically found in time series data. These anomalies are significant when the temporal order of data points plays a key role in detecting deviations from expected patterns." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 175, + 301, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 175, + 301, + 246 + ], + "spans": [ + { + "bbox": [ + 57, + 175, + 301, + 246 + ], + "type": "text", + "content": "- Cluster-based and Correlation Anomalies: Anomalies that occur when a group of data points, or relationships between variables, deviate from expected patterns. This is more prominent in non-time-based data, where detecting irregular clusters or correlations between features is essential for AD." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 265, + 130, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 265, + 130, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 265, + 130, + 276 + ], + "type": "text", + "content": "B. Data Processing" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 281, + 301, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 281, + 301, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 281, + 301, + 460 + ], + "type": "text", + "content": "Effective AD requires careful preparation and preprocessing of input data to ensure that detection algorithms can operate effectively. In many cases, raw data contains inherent challenges that can significantly hinder the performance of AD models. These challenges arise from the complexity of real-world data, including high dimensionality, missing or sparse values, skewed class distributions, and noise that can obscure true anomalies. Without addressing these issues, AD methods may struggle to accurately identify rare or subtle deviations in the data, leading to false positives, missed anomalies, or inefficient computations. Therefore, appropriate data preprocessing steps are crucial for improving detection accuracy, robustness, and overall system reliability. This subsection outlines some of the most common data processing issues and their implications for AD, along with strategies to mitigate these challenges." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 460, + 301, + 750 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 45, + 460, + 301, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 460, + 301, + 687 + ], + "spans": [ + { + "bbox": [ + 45, + 460, + 301, + 687 + ], + "type": "text", + "content": "1) Dimensionality: High-dimensional data makes AD more complex due to the \"curse of dimensionality\". As datasets expand in size and complexity—particularly with the rise of \"big data\", characterized by large-scale, high-velocity data generated from diverse sources, it becomes increasingly difficult for AD methods to maintain accuracy [36]. To address this issue, dimensionality reduction is a common approach that transforms a large set of input features into a smaller, more focused feature set [37]. While traditional methods such as PCA [38] are frequently used, they may struggle to capture nonlinear relationships in complex data. For instance, Sakurada et al. [39] compare autoencoders, which perform non-linear dimensionality reduction, with linear PCA and kernel PCA on both synthetic and real-world datasets. The study reveals that on the nonlinear and high-dimensional synthetic Lorenz dataset, AE achieved a relative AUC improvement of " + }, + { + "bbox": [ + 45, + 460, + 301, + 687 + ], + "type": "inline_equation", + "content": "26.83\\%" + }, + { + "bbox": [ + 45, + 460, + 301, + 687 + ], + "type": "text", + "content": " compared to linear PCA. This highlights that autoencoders can even detect anomalies in data with relatively high intrinsic dimensionality, where linear PCA struggles to perform." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "content": "2) Sparsity: Sparse data, where many values are missing or incomplete, poses significant challenges for AD. Sparse datasets can lead to reduced detection accuracy, as missing or incomplete data points may obscure the underlying patterns necessary for detecting anomalies [36]. Cheng et al." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 307, + 54, + 564, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 54, + 564, + 233 + ], + "spans": [ + { + "bbox": [ + 307, + 54, + 564, + 233 + ], + "type": "text", + "content": "[40] highlight that in high-dimensional settings, the sparsity problem is further amplified as the data becomes more spread out, increasing the risk of missing critical information that signals anomalies. To address these challenges, Li et al. [41] propose an improved low-rank and sparse decomposition model (LSDM) for hyperspectral AD. Their approach models sparse components as a Gaussian Mixture (MoG), effectively capturing anomalous patterns within complex datasets by leveraging the low-rank structure. In contrast, Han et al. [42] take a different approach by introducing sparse autoencoders to learn sparse latent representations from high-dimensional input data. Through experiments on three real-world cyber-physical system datasets, the study shows that mining sparse latent patterns from high-dimensional time series can significantly improve the robustness of AD models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 234, + 564, + 748 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 308, + 234, + 564, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 234, + 564, + 449 + ], + "spans": [ + { + "bbox": [ + 308, + 234, + 564, + 449 + ], + "type": "text", + "content": "3) Class Imbalance: In most AD tasks, the occurrence of anomalies is significantly rarer than normal data points, resulting in a class imbalance problem. This imbalance can cause detection algorithms to be overly biased toward the majority class (normal data), leading to a higher rate of false negatives where critical anomalies are missed. In imbalanced datasets, it is often possible to achieve an overall high accuracy, while the recall score for the minority class (anomalies) remains very low [43]. Traditional methods to mitigate this issue involve oversampling the minority class or undersampling the majority class [44]. Recent research has increasingly focused on introducing Data Generation Models (DGM) to improve the representation of the minority class in AD. For instance, Dlamini et al. [45] use Conditional Generative Adversarial Networks (CGANs) to generate synthetic samples for the minority class and combines this with KL divergence to guide the model in accurately learning the distribution of the minority class." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 449, + 564, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 564, + 676 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 564, + 676 + ], + "type": "text", + "content": "4) Noise in Data: Noise refers to random or irrelevant information present in the data, which can obscure true anomalies and lead to false positives. In addition, during the training process of AD models, the high complexity of the model and the presence of noisy data can lead to overfitting, where the model inadvertently learns to fit the reconstruction error from noisy inputs rather than focusing on genuine anomalies [46]. To reduce the impact of noisy data, Zhang et al. [47] incorporate a Maximum Mean Discrepancy (MMD) to encourage the distribution of low-dimensional representations to approximate a target distribution. The goal is to align the distribution of noisy data with that of normal training data, thereby reducing the risk of overfitting. Furthermore, Chen et al. [48] propose a novel method called Noise Modulated Adversarial Learning, where noise images from a predefined normal distribution are fed into the discriminator network as negative samples. This adversarial process modulates the training of the reconstruction network, balancing the learning between the two networks to improve robustness against noise." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 677, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 677, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 677, + 564, + 748 + ], + "type": "text", + "content": "5) Privacy of data: In many fields, such as healthcare, finance, and cybersecurity, data used for AD often contains sensitive or personal information. Ensuring the privacy and security of this data is paramount, as improper handling could lead to serious legal and ethical violations. Hassan et al. [49] conducte an in-depth investigation into the privacy of AD" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 236 + ], + "type": "text", + "content": "models in blockchain technology. To address these privacy concerns, Federated Learning (FL), a distributed machine learning paradigm, has emerged as a promising supplement to AD [50]. FL allows distributed clients to collaboratively train a shared model while protecting the privacy of their local data. For example, Idrissi et al. [51] propose Fed-ANIDS, which leverages FL to address the privacy issues associated with centralized Network Intrusion Detection Systems (NIDS). This model was applied to various settings and popular datasets, demonstrating its ability to achieve high performance while preserving the privacy of distributed client data. Cui et al. [52] further introduce GAN into FL and design a new algorithm model that injects controllable noise into local model parameters, ensuring both AD utility and compliance with differential privacy requirements." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 109, + 248, + 237, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 248, + 237, + 258 + ], + "spans": [ + { + "bbox": [ + 109, + 248, + 237, + 258 + ], + "type": "text", + "content": "III. RELATED APPLICATIONS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 262, + 301, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 262, + 301, + 515 + ], + "spans": [ + { + "bbox": [ + 45, + 262, + 301, + 515 + ], + "type": "text", + "content": "With the rapid advancement of deep learning models, AD has become more efficient and adaptable. These sophisticated models have been widely applied across various domains, enhancing the ability to identify irregular patterns in complex and high-dimensional datasets. In the previous chapter, we categorized data based on temporal characteristics into time-series and non-time-series data. However, visual data presents unique challenges, detection requirements, and a wide range of applications, making it difficult to be strictly classified as either time-series or non-time-series data. It can be static (e.g., images) or dynamic (e.g., videos), where images are typically considered non-time-series data, while videos fall under time-series data. Visual data is extensively used in fields such as medical imaging, autonomous systems, and surveillance, where detecting anomalies requires specialized deep learning techniques that differ from traditional numerical or categorical data analysis. To better reflect its broad applications and distinct computational needs, we discuss visual data separately. Based on this classification, we will now explore the applications of deep learning in AD from three perspectives: time-series data, non-temporal data, and visual data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 530, + 200, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 530, + 200, + 541 + ], + "spans": [ + { + "bbox": [ + 45, + 530, + 200, + 541 + ], + "type": "text", + "content": "A. Applications in Time Series Data" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 544, + 300, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 544, + 300, + 687 + ], + "spans": [ + { + "bbox": [ + 45, + 544, + 300, + 687 + ], + "type": "text", + "content": "Time series data, defined by its sequential nature over time, is fundamental to many systems where the temporal order of events critically influences analysis and decision-making processes. AD in time series data has become an indispensable technique across various industries, enabling the early detection of irregular patterns that may indicate underlying issues or emerging threats. The applications of time series AD are extensive, impacting critical areas such as traffic monitoring, power system management, and healthcare. In the following sections, we present how these applications leverage AD to enhance operational efficiency, ensure system reliability, and improve safety across these fields." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "content": "1) Traffic Monitoring: Time series AD plays a pivotal role in modern traffic management systems. As demonstrated in [53], real-time data from loop detection sensors are integrated and analyzed to predict traffic volume and enhance system safety. The ability to detect anomalies in traffic patterns is" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 55, + 564, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 55, + 564, + 247 + ], + "spans": [ + { + "bbox": [ + 307, + 55, + 564, + 247 + ], + "type": "text", + "content": "essential for anticipating and responding to potential incidents before they escalate. For instance, Li et al. [54] present a method that identifies traffic incidents by detecting anomalies in traffic time series data, thereby helping users avoid accidents and reduce travel time. Furthermore, high-speed driving is identified as a significant contributor to traffic accidents [55]. By monitoring and analyzing sudden increases in vehicle speed, AD techniques can predict and prevent accidents more effectively, providing a critical tool for improving road safety. Zhao et al. [56] further validate the efficacy of unsupervised AD methods in assessing elevated road traffic accident risks, specifically by analyzing volume and speed data from traffic on Yan'an elevated road. This approach enhances the ability to detect and respond to hazardous traffic conditions in real-time, underscoring the indispensable role of AD in traffic management." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 258, + 564, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 258, + 564, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 258, + 564, + 521 + ], + "type": "text", + "content": "2) Power System: AD is a vital element in ensuring the stability, security, and reliability of electrical grids. By continuously monitoring grid data, these techniques can swiftly identify deviations from normal operational patterns, which may indicate issues such as natural faults or malicious cyberattacks. The ability to detect these anomalies in real-time is crucial for preventing potential outages and maintaining a consistent power supply. For instance, Li et al. [57] highlight that accurate and real-time AD can enhance grid stability by over " + }, + { + "bbox": [ + 308, + 258, + 564, + 521 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 308, + 258, + 564, + 521 + ], + "type": "text", + "content": ", providing rapid response capabilities that significantly bolster the system's defense against both natural disruptions and cyber threats. Furthermore, the introduction of a residential electrical load AD framework, as demonstrated in [58], has been shown to significantly improve both load prediction accuracy and AD, thereby optimizing demand-side management (DSM) in residential areas. In terms of cybersecurity, the MENSA Intrusion Detection System (IDS) [59] has proven to be a formidable tool in smart grid environments, effectively detecting operational anomalies and classifying a wide range of cyberattacks. This capability not only protects critical infrastructure but also underscores the indispensable role of AD in modern power system management." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 533, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 533, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 533, + 564, + 748 + ], + "type": "text", + "content": "3) Healthcare: AD plays a crucial role in healthcare by enabling continuous monitoring of patient vital signs, such as heart rate and blood pressure, to swiftly identify abnormal conditions that may require urgent medical intervention. The application of AD in medical signal analysis is particularly important, as highlighted in [60], where the identification of data samples that deviate from the typical data distribution can reveal underlying issues such as noise, changes in a patient's condition, or the emergence of new and previously undetected medical conditions. This capability is essential for ensuring accurate diagnosis and timely patient care. Furthermore, Keeley et al. [61] demonstrate that AD algorithms can effectively identify irregularities in heart rate data, which not only facilitates faster emergency responses but also provides deeper insights into a patient's health status. This, in turn, enhances overall patient care while also reducing the cognitive load on healthcare professionals by automating the detection of potential issues." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 55, + 208, + 67 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 55, + 208, + 67 + ], + "spans": [ + { + "bbox": [ + 46, + 55, + 208, + 67 + ], + "type": "text", + "content": "B. Applications in Non-temporal Data" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 72, + 301, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 72, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 45, + 72, + 301, + 192 + ], + "type": "text", + "content": "AD in non-temporal data plays a critical role in ensuring operational integrity, security, and financial stability. By focusing on identifying irregularities within independent events or static datasets, it addresses potential risks such as fraud, system failures, and malicious activities. Unlike time-series applications, non-temporal AD leverages data patterns and statistical analysis to uncover deviations that signal anomalies. In the following, we present specific applications across domains such as finance and cybersecurity, showcasing its significant impact on safeguarding critical systems and operations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 193, + 301, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 193, + 301, + 372 + ], + "spans": [ + { + "bbox": [ + 45, + 193, + 301, + 372 + ], + "type": "text", + "content": "1) Finance: In the financial sector, non-temporal data AD is pivotal for identifying fraudulent transactions, credit scoring anomalies, and unusual trading activities. Unlike time series data, these financial fraud detection tasks often involve independent events, such as individual transactions or credit score evaluations, which do not rely on temporal sequences. Instead, the focus is on transaction characteristics and patterns that may indicate fraudulent behavior. Various data mining techniques, including SVM, Naïve Bayes, and Random Forest, are extensively employed to detect different forms of financial fraud, such as bank fraud, insurance fraud, financial statement fraud, and cryptocurrency fraud [62]. As highlighted by [63], AD is critical in quickly identifying activities that deviate from normal patterns, thereby enabling rapid intervention to minimize financial losses." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 373, + 302, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 373, + 302, + 661 + ], + "spans": [ + { + "bbox": [ + 45, + 373, + 302, + 661 + ], + "type": "text", + "content": "2) Cybersecurity: AD is a fundamental component of maintaining a secure and resilient cyberspace. As [64] points out, advanced security controls and resilience analysis are crucial during the early stages of system deployment to ensure long-term sustainability. AD plays a pivotal role in this process by identifying unauthorized access, malicious activities, and network intrusions that deviate from established norms. This capability is essential for safeguarding network security and preventing potential breaches. Early research in deep learning-based network intrusion detection focused on architectures such as Autoencoders (AE), Deep Belief Networks (DBN), and Recurrent Neural Networks (RNN) [24]. As deep learning technology has advanced, more sophisticated models have been developed for detecting anomalies in cybersecurity. For instance, Singh et al. [65] illustrate the benefits of AD in wide-area protection schemes (WAPS) by using a deep learning-based cyber-physical AD system (CPADS) to detect and mitigate data integrity and communication failure attacks in centralized Remedial Action Schemes (CRAS). Similarly, Nagarajan et al. [66] highlights the effectiveness of AD in enhancing the security of Cyber-Physical Systems (CPSs) by accurately identifying anomalous behaviors, thereby addressing the growing challenges posed by sophisticated cyberattacks and the increasing volume of data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 683, + 176, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 683, + 176, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 683, + 176, + 696 + ], + "type": "text", + "content": "C. Applications in Visual data" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": "AD in visual data, encompassing images and videos, plays a vital role in numerous industries where visual inspection is critical. Applications range from detecting defects in manufacturing processes to identifying medical abnormalities in" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 55, + 564, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 55, + 564, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 55, + 564, + 149 + ], + "type": "text", + "content": "imaging, monitoring public safety through surveillance systems, and ensuring quality control in production lines. By leveraging advanced deep learning techniques, AD methods can automatically identify and analyze irregularities with high precision, reducing reliance on manual inspection and improving efficiency. In this section, we explore key applications of visual data-based AD, highlighting its transformative impact across various domains." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 150, + 564, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 150, + 564, + 461 + ], + "spans": [ + { + "bbox": [ + 307, + 150, + 564, + 461 + ], + "type": "text", + "content": "1) Medical Imaging: AD in medical imaging is indispensable across numerous medical specialties, playing a crucial role in the early detection and diagnosis of diseases. In radiology, it is employed to identify anomalies in X-rays [67], brain imaging [68], and CT scans [69], thereby aiding in the accurate diagnosis of various conditions. However, as [70] highlights, anomalies in medical images often closely resemble normal tissue, posing a significant challenge to detection due to their subtle differences. This similarity requires the use of sophisticated techniques to effectively distinguish between normal and anomalous data. For example, Draelos et al. [71] demonstrate the power of machine learning in radiology, significantly enhancing the classification performance for multiple abnormalities in chest CT volumes, achieving an AUROC greater than 0.90 for 18 different abnormalities. Additionally, Shvetsova et al. [72] showcase a novel method for AD in medical images, which dramatically improves the detection of subtle abnormalities in complex, high-resolution images, such as chest X-rays and pathology slides—scenarios where traditional models often fail. Furthermore, Zhao et al. [73] introduce the SALAD framework, which enhances AD in medical images by utilizing self-supervised and translation-consistent features from normal data. This approach is particularly effective in situations where labeled anomalous images are scarce, thereby improving detection accuracy in challenging medical imaging tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 461, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 461, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 461, + 564, + 750 + ], + "type": "text", + "content": "2) Video Monitoring: Video AD (VAD) has become increasingly crucial with the rise of large-scale multimedia data analysis, particularly in the processing of video data [74]. VAD focuses on identifying unusual patterns or behaviors in video footage that deviate from the norm, making it a vital tool in several domains. In security and surveillance, VAD is used to monitor public spaces, buildings, and secure areas, enabling the detection of suspicious activities, unauthorized access, and unusual crowd behaviors, thereby enhancing public safety [75]. In the realm of traffic monitoring, VAD facilitates the real-time identification of accidents and irregular traffic patterns, allowing for prompt response and management [76]. Additionally, VAD is applied in behavioral analysis to detect abnormal behaviors in various environments, such as schools, workplaces, and public transportation systems, contributing to the maintenance of safety and order. For example, Chen et al. [77] propose a bidirectional prediction framework specifically designed for AD in surveillance videos. This innovative approach employs forward and backward prediction subnetworks to predict the same target frame, constructing a loss function based on the real target frame and its bidirectional predictions. Experimental results demonstrate that this model outperforms existing approaches on various surveillance video datasets, including those featuring pedestrians and street scenes, showcas" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 78 + ], + "type": "text", + "content": "ing its superior performance in accurately detecting anomalies in real-world surveillance scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 92, + 275, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 92, + 275, + 114 + ], + "spans": [ + { + "bbox": [ + 71, + 92, + 275, + 114 + ], + "type": "text", + "content": "IV. DEEP LEARNING METHODS FOR ANOMALY DETECTION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 119, + 301, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 119, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 45, + 119, + 301, + 227 + ], + "type": "text", + "content": "The application of deep learning to AD has revolutionized the way we identify irregularities in both time-based and non-time-based datasets [78]. Traditional methods, such as statistical analysis and clustering, have been commonly used to detect anomalies. However, these methods often struggle with high-dimensional data, complex relationships, and capturing intricate patterns. Deep learning models, with their ability to learn hierarchical representations and detect subtle anomalies, have emerged as powerful tools to overcome these limitations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 227, + 301, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 227, + 301, + 299 + ], + "spans": [ + { + "bbox": [ + 45, + 227, + 301, + 299 + ], + "type": "text", + "content": "As shown in Fig.2, this section introduces three major deep learning approaches applied to AD: reconstruction-based methods, prediction-based methods, and hybrid approaches. Each approach leverages the strengths of deep learning in distinct ways to improve AD accuracy, particularly in scenarios where data patterns are complex, unstructured, or temporal." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 315, + 301, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 315, + 301, + 337 + ], + "spans": [ + { + "bbox": [ + 45, + 315, + 301, + 337 + ], + "type": "text", + "content": "A. Deep learning methods for Anomaly Detection based on Reconstruction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 342, + 301, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 342, + 301, + 567 + ], + "spans": [ + { + "bbox": [ + 45, + 342, + 301, + 567 + ], + "type": "text", + "content": "Reconstruction-based approaches operate by training a model to learn the underlying distribution of normal data [79]. Once trained, the model attempts to reconstruct incoming data. The reconstruction error, which is the difference between the original data and its reconstruction, is then used as an indicator of anomaly. A high reconstruction error suggests that the data is anomalous, as it deviates from the learned normal patterns. Deep learning-based reconstructive models have become prominent due to their ability to capture complex patterns in high-dimensional data. In recent years, most reconstruction-based AD models have been developed using techniques such as GAN, AE, and diffusion models. These models each have unique strengths and weaknesses, as summarized in Table I. This table consolidates insights from multiple studies, including [80], [81], [82], and [83], which have analyzed the advantages and limitations of GANs, VAEs, and Diffusion Models in AD. In this section, we introduce these three types of models in the context of AD and discuss their various variants." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": "1) GAN-based Anomaly Detection: GANs are powerful tools for generating synthetic data that resembles a given training dataset [84]. As shown in the upper part of Fig.3, GANs consist of two main components: a generator and a discriminator, both of which are neural networks. Because of this structure, GAN models are highly flexible, allowing for different networks to be chosen as the generator and discriminator based on the specific task. This flexibility makes GANs a versatile framework for a wide range of applications. The generator " + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": " takes a random noise vector " + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": " (usually sampled from a Gaussian distribution) as input and generates synthetic data " + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "inline_equation", + "content": "G(z)" + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": ". The discriminator " + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": " receives a data sample (either from the real dataset or from the generator) as input and outputs a probability " + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "inline_equation", + "content": "D(x)" + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": ", representing the likelihood that the input is real (i.e., from the actual dataset) rather than fake (i.e.," + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 326, + 53, + 542, + 103 + ], + "blocks": [ + { + "bbox": [ + 326, + 53, + 542, + 103 + ], + "lines": [ + { + "bbox": [ + 326, + 53, + 542, + 103 + ], + "spans": [ + { + "bbox": [ + 326, + 53, + 542, + 103 + ], + "type": "image", + "image_path": "0582fc6497eafabed1bd3451b1290a6b09ece498ffa50d1156dd7f0120e507ef.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 326, + 114, + 542, + 167 + ], + "blocks": [ + { + "bbox": [ + 326, + 114, + 542, + 167 + ], + "lines": [ + { + "bbox": [ + 326, + 114, + 542, + 167 + ], + "spans": [ + { + "bbox": [ + 326, + 114, + 542, + 167 + ], + "type": "image", + "image_path": "880d55c7ec7dc69d5f5d9f1f2b32b3c2153e316b56c5560359f5852c0d17b74d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 326, + 168, + 542, + 252 + ], + "blocks": [ + { + "bbox": [ + 326, + 168, + 542, + 252 + ], + "lines": [ + { + "bbox": [ + 326, + 168, + 542, + 252 + ], + "spans": [ + { + "bbox": [ + 326, + 168, + 542, + 252 + ], + "type": "image", + "image_path": "ab7aedd616ec4505c3fc87c94f267fe1e3ff25684d846743f04d6cdec18a9037.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 262, + 564, + 281 + ], + "lines": [ + { + "bbox": [ + 308, + 262, + 564, + 281 + ], + "spans": [ + { + "bbox": [ + 308, + 262, + 564, + 281 + ], + "type": "text", + "content": "Fig. 2. Three types of anomaly detection: (a) Reconstruction-based approache, (b) Prediction-based approache, (c) Hybrid method." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 326, + 297, + 544, + 466 + ], + "blocks": [ + { + "bbox": [ + 326, + 297, + 544, + 466 + ], + "lines": [ + { + "bbox": [ + 326, + 297, + 544, + 466 + ], + "spans": [ + { + "bbox": [ + 326, + 297, + 544, + 466 + ], + "type": "image", + "image_path": "7509a938836ca50d09aa01364c29c0e376376912d0ca6818c1dc0151a063308a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 475, + 514, + 486 + ], + "lines": [ + { + "bbox": [ + 308, + 475, + 514, + 486 + ], + "spans": [ + { + "bbox": [ + 308, + 475, + 514, + 486 + ], + "type": "text", + "content": "Fig. 3. Structural Frameworks for GAN Anomaly Detection." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 509, + 564, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 509, + 564, + 629 + ], + "spans": [ + { + "bbox": [ + 307, + 509, + 564, + 629 + ], + "type": "text", + "content": "generated by the generator). The generator and discriminator are trained simultaneously through a process where the generator tries to produce data that can fool the discriminator, and the discriminator tries to improve its ability to distinguish between real and fake data. Table II provides a comprehensive summary of recent GAN-based AD models, categorizing them based on their techniques, approaches, strengths, and weaknesses. This table highlights how different GAN variants are tailored for specific AD tasks, along with the types of data they are applied to and their publication years." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 630, + 564, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 630, + 564, + 654 + ], + "spans": [ + { + "bbox": [ + 308, + 630, + 564, + 654 + ], + "type": "text", + "content": "The training process of GANs can be described as a minimax game with the following objective function:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 673, + 563, + 707 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 673, + 563, + 707 + ], + "spans": [ + { + "bbox": [ + 320, + 673, + 563, + 707 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\min _ {G} \\max _ {D} V (D, G) = \\mathbb {E} _ {x \\sim p _ {d a t a} (x)} [ \\log D (x) ] \\\\ + \\mathbb {E} _ {z \\sim p _ {z} (z)} [ \\log (1 - D (G (z))) ]. \\quad (1) \\\\ \\end{array}", + "image_path": "833120e573f186fcf6c195b1480a6112e6e1f60a22837b1a5310d21419b89868.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": "In this function, " + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "inline_equation", + "content": "p_{data}(x)" + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": " represents the distribution of the real data, " + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "inline_equation", + "content": "p_z(z)" + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": " represents the distribution of the noise vector " + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "inline_equation", + "content": "G(z)" + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": " is the data generated by the generator, and " + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "inline_equation", + "content": "D(x)" + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": " is the" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 83, + 552, + 167 + ], + "blocks": [ + { + "bbox": [ + 160, + 57, + 449, + 74 + ], + "lines": [ + { + "bbox": [ + 160, + 57, + 449, + 74 + ], + "spans": [ + { + "bbox": [ + 160, + 57, + 449, + 74 + ], + "type": "text", + "content": "TABLEI COMPARISON OF GANS, VAES, AND DIFFUSION MODELS IN ANOMALY DETECTION" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 83, + 552, + 167 + ], + "lines": [ + { + "bbox": [ + 58, + 83, + 552, + 167 + ], + "spans": [ + { + "bbox": [ + 58, + 83, + 552, + 167 + ], + "type": "table", + "html": "
ModelStrengthsWeaknesses
GANs• Capable of generating high-fidelity, realistic samples.\n• Learns complex data distributions using adversarial loss.\n• Useful in AD by distinguishing real vs. generated data.• Prone to mode collapse, leading to low sample diversity.\n• Hard to train with difficult-to-interpret losses.\n• Training is unstable and hard to converge.
VAEs• Easy to train with one tractable likelihood loss.\n• Provides high sample diversity by covering all data modes.\n• Latent space representation is useful for AD tasks.• Produces low-fidelity, often blurry samples.\n• Pixel-based loss leads to sample ambiguity and blurriness.
Diffusion Models• Generates high-fidelity samples with gradual refinement.\n• High sample diversity due to likelihood maximization.\n• Intermediate noisy images serve as useful latent codes for AD.• Slow sample generation due to the multi-step denoising process.\n• Computationally intensive, requiring many steps for both forward and reverse diffusion.
", + "image_path": "09194629b0767868e7df5246fea6e4a2179c838fa9818a8cc9e274fc26ee5ae9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "spans": [ + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "text", + "content": "probability that " + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "text", + "content": " is real. The generator " + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "text", + "content": " aims to minimize this objective, while the discriminator " + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 45, + 187, + 301, + 258 + ], + "type": "text", + "content": " aims to maximize it. The discriminator updates its weights to maximize the probability of correctly classifying real and generated data, while the generator updates its weights to minimize the discriminator's ability to distinguish between real and fake data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 258, + 301, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 258, + 301, + 510 + ], + "spans": [ + { + "bbox": [ + 45, + 258, + 301, + 510 + ], + "type": "text", + "content": "In the context of AD, GANs play crucial roles in both representation learning and data augmentation, each serving distinct purposes within deep Learning [85]. In representation learning, the primary objective of GANs is to learn and model the underlying distribution of the data, enabling the generation of synthetic data that closely resembles real data. This process involves a generator that creates fake data from random noise and a discriminator that distinguishes between real and fake data. Through iterative training, the generator improves its ability to produce realistic data, which is particularly useful in tasks like AD. For example, in [86], GANs are used for representation learning by generating fake data that matches the distribution of normal data. This generated data is then used to train a VAE to detect anomalies through reconstruction errors. Similarly, in [87], a fault-attention generative probabilistic adversarial autoencoder (FGPAA) is proposed, combining GANs and autoencoders for AD by learning the low-dimensional manifold of healthy state data. The GAN component aids in feature representation learning, reducing signal information loss and enhancing the model's ability to detect anomalies through distribution probability and reconstruction error." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "text", + "content": "There are two main structures to using GANs for AD, as shown in Fig.3. The first approach is based on the generator, as depicted in the lower part of Fig.3, highlighted by the yellow box. The basic idea is to train the GAN on normal data and then use the reconstruction error to identify anomalies. During the training phase, the GAN is trained exclusively on normal data, allowing the generator to learn to produce data that closely mimics the normal data distribution. During the detection phase, a test data point " + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "text", + "content": " is fed into the generator to obtain the reconstructed data " + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "inline_equation", + "content": "G(x)" + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "text", + "content": ". The reconstruction error, typically measured as the difference between the original data point " + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "text", + "content": " and the reconstructed data " + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "inline_equation", + "content": "G(x)" + }, + { + "bbox": [ + 45, + 510, + 301, + 723 + ], + "type": "text", + "content": ", is then used to detect anomalies. This can be quantified using metrics such as mean squared error (MSE). If the reconstruction error exceeds a predefined threshold, the data point is classified as an anomaly. The intuition behind this approach is that the generator, trained solely on normal data, will struggle to accurately reconstruct anomalous data, resulting in a high reconstruction error." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": "The mathematical representation for AD using GANs involves computing the reconstruction error " + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "inline_equation", + "content": "E(x)" + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 389, + 198, + 563, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 198, + 563, + 212 + ], + "spans": [ + { + "bbox": [ + 389, + 198, + 563, + 212 + ], + "type": "interline_equation", + "content": "E (x) = \\| x - G (x) \\| ^ {2}, \\tag {2}", + "image_path": "a55eb6817213979a54e6dc36acf538fe4e0b2f0ec1f3c9f8bb2d32bba446d192.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "inline_equation", + "content": "\\| \\cdot \\| ^2" + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "text", + "content": " denotes the squared Euclidean distance. A threshold " + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "text", + "content": " is set, and if " + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "inline_equation", + "content": "E(x) > \\tau" + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "text", + "content": ", the data point " + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 307, + 217, + 564, + 410 + ], + "type": "text", + "content": " is considered an anomaly. For example, Dong et al. [88] propose a semi-supervised approach for video AD using a dual discriminator-based GAN structure, focusing on representation learning. In this approach, the generator predicts future frames for normal events, and anomalies are detected by evaluating the quality of these predictions. Similarly, Guo et al. [89] introduce RegraphGAN, a graph generative adversarial network specifically designed for dynamic graph AD. RegraphGAN utilizes GAN-based representation learning to encode complex spatiotemporal relationships in graph data, allowing it to better capture anomalies. By leveraging encoders to project input samples into a latent space and integrating GANs to enhance both training stability and efficiency, RegraphGAN significantly improves AD performance over existing methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 411, + 564, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 564, + 614 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 564, + 614 + ], + "type": "text", + "content": "The second approach leverages the discriminator highlighted by the green box in Fig.3. A well-trained discriminator has the ability to differentiate between real (normal) and fake (anomalous) samples. During the detection phase, test samples are directly input to the discriminator, which evaluates the likelihood that a given sample is real. If the discriminator assigns a low probability to a sample, suggesting that it is likely fake or anomalous, the sample is flagged as an anomaly. This method relies on the discriminator's capacity to recognize deviations from the normal data distribution it learned during training. For instance, Liu et al. [90] propose a GAN framework that uses multiple generators to produce potential outliers, which are then distinguished from normal data by a discriminator to detect anomalies. The discriminator's output score is used to evaluate the anomaly degree of input data, providing a comprehensive reference distribution and preventing mode collapse." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 616, + 564, + 749 + ], + "type": "text", + "content": "Additionally, GANs are highly effective in data augmentation, helping to mitigate the scarcity of anomaly samples, which often results in data imbalance and poor generalization [91]. When anomaly samples are unevenly distributed or lacking in diversity, models struggle to learn rare anomalies and can overfit to the training set, reducing their accuracy on unseen data. Traditional data augmentation techniques—such as scaling, rotation, random cropping, translation, flipping, and copy-paste—attempt to mitigate these issues. However, simple linear transformations fail to capture new distributions and features of unknown anomalies, such as random changes in" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 330 + ], + "type": "text", + "content": "shape or texture. This is where GANs provide a significant advantage. By generating synthetic anomaly data that mimics the distribution of real-world anomalies, GANs enable models to learn a more diverse set of anomaly features. This not only addresses the imbalance problem but also improves the model's generalization capabilities, as it learns to detect anomalies based on a broader range of characteristics beyond those present in the original training dataset. Miao et al. [92] introduce an unsupervised AD framework that uses data augmentation through contrastive learning and GANs to mitigate overfitting. By employing a geometric distribution mask, it enhances data diversity and generates synthetic anomaly samples, addressing the scarcity of anomaly data. In [93], Anomaly-GAN addresses data augmentation by using a mask pool, anomaly-aware loss, and local-global discriminators to generate high-quality, realistic synthetic anomalies with diverse shapes, angles, spatial locations, and quantities in a controllable manner. Li et al. [94] propose augmented time regularized generative adversarial network that combines an augmented filter layer and a novel temporal distance metric to generate high-quality and diverse artificial data, addressing the limitations of existing GAN approaches in handling limited training data and temporal order." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 330, + 301, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 330, + 301, + 544 + ], + "spans": [ + { + "bbox": [ + 45, + 330, + 301, + 544 + ], + "type": "text", + "content": "2) AE-based Anomaly Detection: In recent years, the limitations of traditional AE models in handling complex and noisy data have become more apparent, leading to the development of enhanced methods to improve their performance in AD tasks. For example, Fan et al. [97] introduce a new framework by incorporating " + }, + { + "bbox": [ + 45, + 330, + 301, + 544 + ], + "type": "inline_equation", + "content": "\\ell_{2,1}" + }, + { + "bbox": [ + 45, + 330, + 301, + 544 + ], + "type": "text", + "content": "-norm into the AE, and experiments have demonstrated that this framework can significantly improve ADn accuracy by increasing the model's robustness to noise and outliers during training. Wang et al. [98] demonstrate that introducing an adaptive-weighted loss function can effectively suppress anomaly reconstruction, thereby improving the accuracy of AD. Liu et al. [99] introduce a multi-scale convolutional AE architecture, where multiple stacked convolutional encoder-decoder layers act as background learners to robustly eliminate anomalies of varying sizes during background reconstruction. Additionally, Lin et al. [100] introduce a soft calibration strategy combined with AE to address the issue of data contamination in AD." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 544, + 301, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 544, + 301, + 687 + ], + "spans": [ + { + "bbox": [ + 45, + 544, + 301, + 687 + ], + "type": "text", + "content": "VAEs are another generative model widely used in AD tasks. Like GANs, VAEs aim to learn the distribution of normal data to identify anomalies. However, unlike GANs, which rely on adversarial training between a generator and a discriminator, VAEs use an encoder-decoder architecture. Fig.4 illustrates the structure of AD based on VAE. The goal of a VAE is to map the input data into a latent space through the encoder and model the data distribution probabilistically within this space. This approach allows the VAE to generate new data that closely resembles the true data distribution, and anomalies can be detected by evaluating the reconstruction error." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "content": "The internal structure of a VAE is similar to that of a traditional AE but with some key differences. First, the encoder in a VAE not only compresses the input data into a lower-dimensional latent space but also learns a probabilistic distribution, typically parameterized by a mean " + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "content": " and a vari" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 326, + 53, + 544, + 159 + ], + "blocks": [ + { + "bbox": [ + 326, + 53, + 544, + 159 + ], + "lines": [ + { + "bbox": [ + 326, + 53, + 544, + 159 + ], + "spans": [ + { + "bbox": [ + 326, + 53, + 544, + 159 + ], + "type": "image", + "image_path": "fd57f4067bb6f3c6e9b25b5ccfc25170dec2d8afbbeb522aa5c35f6a09e5a7e3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 166, + 512, + 177 + ], + "lines": [ + { + "bbox": [ + 308, + 166, + 512, + 177 + ], + "spans": [ + { + "bbox": [ + 308, + 166, + 512, + 177 + ], + "type": "text", + "content": "Fig. 4. Structural Frameworks for VAE Anomaly Detection." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "spans": [ + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "type": "text", + "content": "ance " + }, + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "type": "inline_equation", + "content": "\\sigma^2" + }, + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "type": "text", + "content": " as shown in Fig.4. This enables the VAE to generate more meaningful latent variables " + }, + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 307, + 196, + 566, + 317 + ], + "type": "text", + "content": ", enhancing the diversity and robustness of the generated data. A critical component introduced in VAEs is the Kullback-Leibler (KL) divergence, which measures the difference between the latent distribution generated by the encoder and a predefined prior distribution (usually a standard normal distribution). Unlike traditional AEs, which focus solely on minimizing the reconstruction error, VAEs are trained by minimizing a combination of the reconstruction error and the KL divergence:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 335, + 323, + 563, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 323, + 563, + 337 + ], + "spans": [ + { + "bbox": [ + 335, + 323, + 563, + 337 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {V A E}} = \\mathbb {E} _ {q (z | x)} [ \\log p (x | z) ] - D _ {\\mathrm {K L}} (q (z | x) \\| p (z)). \\tag {3}", + "image_path": "6daf740dc031ec066115a38562e4dd3bb6bddacf671df3dc996ddae37458be6a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 342, + 566, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 566, + 723 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 566, + 723 + ], + "type": "text", + "content": "This difference makes VAEs more powerful in AD because they not only consider the quality of the data reconstruction but also enforce a structured latent space through the KL divergence. By doing so, KL divergence helps to regularize the latent space, ensuring that the encoded representations are smoothly distributed and centered around the prior distribution. This regularization reduces overfitting, promotes better generalization, and makes it easier to distinguish between normal and anomalous data, especially in complex and high-dimensional datasets. Table III provides a comprehensive summary of the latest advancements in VAE-based AD models, showcasing innovative enhancements that address various challenges such as noise robustness, semantic feature learning, and anomaly reconstruction. Huang et al. [101] enhance VAE-based AD by incorporating an Autoencoding Transformation into the model, which ensures that the training phase effectively captures high-level visual semantic features of normal images, thereby increasing the anomaly score gap between normal and anomalous samples. Similarly, Yin et al. [102] utilize Convolutional Neural Network (CNN) and VAE with a two-stage sliding window approach in data preprocessing to learn better representations for AD tasks. Zhang Yin et al. [103] propose the Graph Relational Learning Network (GReLeN), which integrates a VAE structure with graph dependency learning for AD in multivariate time series through reconstruction. Zhou et al. [104] propose a variational long short-term memory (VLSTM) model for high-dimensional AD in imbalanced datasets, combining a compression network for efficient data representation with an estimation network for accurate classification of network traffic data. The VLSTM model balances data compression and feature retention using core LSTM and variational modules." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": "In recent years, many advancements in AD models inspired by VAEs have focused on Adversarial Autoencoders (AAEs)" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 81, + 570, + 463 + ], + "blocks": [ + { + "bbox": [ + 221, + 56, + 388, + 74 + ], + "lines": [ + { + "bbox": [ + 221, + 56, + 388, + 74 + ], + "spans": [ + { + "bbox": [ + 221, + 56, + 388, + 74 + ], + "type": "text", + "content": "TABLE II GAN-BASED MODELS IN ANOMALY DETECTION" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 81, + 570, + 463 + ], + "lines": [ + { + "bbox": [ + 47, + 81, + 570, + 463 + ], + "spans": [ + { + "bbox": [ + 47, + 81, + 570, + 463 + ], + "type": "table", + "html": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[90]GANReconstructionDoes not depend on assumptions about the normal data and requires less computing resources.The method involves the selection of multiple hyperparameters, making the tuning process challenging and potentially time-consuming.Structured data2020
[48]GAN+CNNPredictionThe NM-GAN model enhances both the generalization and discrimination abilities through noise-modulated adversarial learning, resulting in improved accuracy and stability for video AD.The model struggles to fully capture complex temporal patterns like staying, wandering, and running, and lacks adaptive modulation of generalization and discrimination abilities, leaving room for improvement in spatiotemporal feature learning.Video data2021
[94]GANReconstructionIs capable of generating more effective artificial samples for training supervised learning models, thereby addressing the issue of data imbalance.Its performance is inferior to the baseline algorithms when the balanced ratio is 0.125.Image data2021
[95]GAN+LSTMPredictionThe TMANomaly framework excels in capturing complex multivariate correlations in industrial time series data, enhancing AD accuracy through mutual adversarial training.The paper lacks discussion on TMANomaly's generalization to other datasets, the potential limitations of using GRA for feature selection, and the computational efficiency or scalability, which are critical for real-time industrial systems.Multivariate time series data2022
[96]GAN+LSTMPredictionFGANomaly method effectively filters anomalous samples before training, improving AD accuracy and robustness by precisely capturing normal data distribution and dynamically adjusting generator focus.The method lacks effective fusion of information across different dimensions in multivariate time series, which limits its ability to fully capture complex correlations.Multivariate time series data2022
[93]GANReconstructionImproves the quality of the generated anomaly images and generates anomalies with different shapes, rotation angles, spatial locations, and numbers in a controllable manner.The images generated are not very sensitive to the change of light.Image data2023
[89]GANReconstructionImproves training efficiency and stability in dynamic graph AD while avoiding the expensive optimization process typical of traditional graph generative adversarial networks.The detection accuracy on the UCI Message dataset is lower than that of TADDY.Dynamic graph data2023
[92]GAN+TransformerReconstructionIt can effectively detect anomalies in long sequences, mitigates overfitting, and incorporates contrastive loss into the discriminator to fine-tune the GAN, ensuring strong generalization ability.It may struggle with irregularly sampled data or datasets with many missing values, requires careful tuning of several hyperparameters, and demands significant computational resources, posing challenges for real-time processing on limited-capacity devices.Multivariate time series data2024
", + "image_path": "0a1dd839ad5ce27d8ca8ac3dde9e2c28a00c69f076a04ee08e2228fd8db6d21b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 483, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 483, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 45, + 483, + 301, + 746 + ], + "type": "text", + "content": "[105]. Unlike traditional VAEs, which use KL divergence to match the latent space distribution to a prior, AAEs achieve this through the use of GANs. Specifically, AAEs employ a GAN's discriminator to evaluate the latent variable distribution produced by the encoder and use adversarial training to align it with the desired prior distribution, providing more flexible control over the quality of the generated data. Wu et al. [87] propose the Fault-Attention Generative Probabilistic Adversarial Autoencoder (FGPAA) for machine AD, utilizing an end-to-end AAE with double discriminators to extract relevant features and ensure accurate equipment health monitoring through a fault-attention probability distribution. Idrissi et al. [51] apply AAE and FL in the field of network intrusion detection, effectively ensuring AD performance while safeguarding client privacy. Experimental results demonstrate that the proposed model outperforms AE, VAE, and AAE on various network traffic datasets, achieving high performance across different metrics. Su et al. [106] propose two contamination-immune BiGAN models, integrating elements of VAE and BiGAN to create a new AAE-based framework that effectively detects anomalies by learning the probability distribution of normal samples from contaminated datasets, significantly outperform" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 483, + 564, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 483, + 564, + 613 + ], + "spans": [ + { + "bbox": [ + 307, + 483, + 564, + 613 + ], + "type": "text", + "content": "ing state-of-the-art methods in scenarios where training data is impure. Similar to the aforementioned AAE models, Du et al. use GANs to purify the original dataset, generating synthetic \"normal\" data to improve outlier detection accuracy. Continuing the advancements in AAE-based models, Yu et al. [107] introduce an Adversarial Contrastive Autoencoder (ACAE) for Multivariate Time Series (MTS) AD, which enhances feature representation through adversarial training and contrastive learning, demonstrating superior performance across multiple real-world datasets, further extending the application of AAE-based methods in robust AD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "spans": [ + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "text", + "content": "3) Diffusion model-Based for Anomaly Detection: Diffusion models are a type of generative model that operate through two key phases: a fixed forward diffusion process and a learnable reverse diffusion process [108]. Mathematically, the forward process involves progressively adding Gaussian noise to the data " + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "text", + "content": ", transforming it into pure noise " + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "inline_equation", + "content": "x_T" + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 308, + 616, + 564, + 699 + ], + "type": "text", + "content": " steps. This process can be described as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 351, + 704, + 564, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 704, + 564, + 719 + ], + "spans": [ + { + "bbox": [ + 351, + 704, + 564, + 719 + ], + "type": "interline_equation", + "content": "q \\left(x _ {t} \\mid x _ {t - 1}\\right) = \\mathcal {N} \\left(x _ {t}; \\sqrt {1 - \\beta_ {t}} x _ {t - 1}, \\beta_ {t} I\\right), \\tag {4}", + "image_path": "599f2b99fd60660c0b7aed4125907dd6087f566ef9745745c026c30544caff37.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "q(x_{t}|x_{t - 1})" + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": " is the conditional probability distribution of " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "x_{t - 1}" + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "\\beta_{t}" + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": " is the noise variance at step " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "inline_equation", + "content": "x_{t}" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 81, + 560, + 479 + ], + "blocks": [ + { + "bbox": [ + 204, + 56, + 406, + 74 + ], + "lines": [ + { + "bbox": [ + 204, + 56, + 406, + 74 + ], + "spans": [ + { + "bbox": [ + 204, + 56, + 406, + 74 + ], + "type": "text", + "content": "TABLE III AUTOENCODER-BASED MODELS IN ANOMALY DETECTION" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 81, + 560, + 479 + ], + "lines": [ + { + "bbox": [ + 50, + 81, + 560, + 479 + ], + "spans": [ + { + "bbox": [ + 50, + 81, + 560, + 479 + ], + "type": "table", + "html": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[104]VAE-based (VAE+LSTM)ReconstructionEffectively addresses imbalanced and high-dimensional challenges in industrial big data.Falls short in achieving the highest AUC and F1 scores compared to other methods.Industrial big data2020
[87]AAE-basedReconstructionFGPAA reduces information loss during feature extraction and constructs fault attention anomaly indicators using low-dimensional feature probability and reconstruction error.Runtime is approximately five times longer than SOM.Rotating machine fault simulator data2020
[98]AE-based (AE+CNN)ReconstructionThe Auto-AD method enables fully autonomous hyperspectral AD, automatically separating anomalies based on reconstruction errors without the need for manual tuning or additional processing.Lower AUC score compared to the GRX method on the Honghu dataset.Hyperspectral data2021
[99]AE-based (AE+CNN)ReconstructionMSNet offers an effective solution to handle multiscale anomaly shapes, providing greater flexibility without the need for threshold fine-tuning.Multiple convolutional encoder-decoder layers and enhanced training increase computational cost and training time.Hyperspectral data2021
[101]VAE-based (VAE+Transformer)ReconstructionSSR-AE leverages self-supervised learning to enhance normal data reconstruction and hinder abnormal data, optimizing mutual information for effective transformation and image reconstruction.Struggles with transformations, heavily relying on their effectiveness for AD.Image data2021
[97]AE-basedReconstructionMaintains geometric structure and local spatial coherence of hyperspectral images (HSI), reducing search space and execution time per pixel.High execution time for constructing the SuperGraph matrix with large datasets.Hyperspectral data2021
[51]AAE-based (AAE+Federated learning)ReconstructionFed-ANIDS demonstrates strong generalization, outperforms GAN-based models, and ensures privacy protection through federated learning.Computational overhead due to the federated learning framework, increasing training complexity and latency.Cybersecurity data2023
[100]AE-basedReconstructionApplicable for time series AD under data contamination.Assumes normal samples follow a Gaussian distribution, limiting applicability, and has higher computational complexity.Time series data2024
[106]AAE-basedReconstructionLearns the probability distribution of normal samples from contaminated datasets, achieving convergence and outperforming baseline models.Relies on the assumption that the contamination ratio is known, which may not always be accurate in practice.Medical image data2024
[86]AAE-basedReconstructionGenerates a clean dataset from contaminated data for AD, with linear scalability for larger datasets.Struggles with detection accuracy in datasets with multiple distribution patterns.Tabular data2024
[107]AAE-basedReconstructionExcels in learning high-level semantic features and capturing normal patterns of MTS with contrastive learning constraints, ensuring stability across parameter settings.Performance on all metrics for SMAP and PSM datasets is lower than baseline methods.Multivariate time series data2024
", + "image_path": "11c660ad378396e7d5a12ad6c18a5c71bc6887df1411ff0efc0d981bd187e2f7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "spans": [ + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "text", + "content": "represents the noisy data at step " + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "text", + "content": ". As " + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "text", + "content": " increases, the data becomes more corrupted by noise until it reaches a state of pure Gaussian noise at step " + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 45, + 501, + 301, + 537 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 543, + 300, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 543, + 300, + 616 + ], + "spans": [ + { + "bbox": [ + 45, + 543, + 300, + 616 + ], + "type": "text", + "content": "The reverse process learns to gradually denoise the data, removing the added noise step by step. The model learns a parameterized distribution " + }, + { + "bbox": [ + 45, + 543, + 300, + 616 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x_{t - 1}|x_t)" + }, + { + "bbox": [ + 45, + 543, + 300, + 616 + ], + "type": "text", + "content": " to reverse the noise addition process, reconstructing the original data from the noisy data. This reverse process is trained to minimize the variational bound on the data likelihood, expressed as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 621, + 299, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 621, + 299, + 635 + ], + "spans": [ + { + "bbox": [ + 79, + 621, + 299, + 635 + ], + "type": "interline_equation", + "content": "L = \\mathbb {E} _ {q} \\left[ D _ {K L} \\left(q \\left(x _ {t - 1} \\mid x _ {t}, x _ {0}\\right) \\mid p _ {\\theta} \\left(x _ {t - 1} \\mid x _ {t}\\right)\\right) \\right]. \\tag {5}", + "image_path": "ad8391976e6f89cfbc05f96fe208d27610a19c9217688d1a8d523a50dd83d456.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "type": "text", + "content": "By progressively removing noise, diffusion models generate high-fidelity samples, first capturing coarse structures and then refining details in each step. In the context of AD, diffusion models are trained on normal data to learn the underlying data distribution through an iterative noise-removal process. Similar to other reconstruction-based methods, anomalies can be identified by evaluating the reconstruction error, where a higher error indicates that the data deviates from the learned normal patterns." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 500, + 564, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 564, + 597 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 564, + 597 + ], + "type": "text", + "content": "Diffusion models stand out from GANs and VAEs in several key ways. They avoid common issues such as mode collapse in GANs, where only a subset of the data distribution is captured, leading to reduced diversity. Diffusion models also overcome the blurriness associated with VAEs, which often results from pixel-based loss and a smaller latent space. By iteratively denoising data, diffusion models maintain both high fidelity and diversity in their outputs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 605, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 605, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 605, + 564, + 750 + ], + "type": "text", + "content": "While diffusion models are slower in generating samples due to their iterative nature, their ability to accurately reconstruct data and cover the full range of the training dataset makes them particularly well-suited for AD [109]. In AD, where precision is critical, diffusion models excel by generating detailed and high-quality samples, enabling them to identify subtle deviations from normal patterns with greater accuracy than other generative models. Several works have leveraged the advantages of diffusion models in ADn. For example, Zhang et al. [110] utilize the high-quality and diverse image generation capabilities of diffusion models to enhance reconstruction quality in DiffAD, addressing the limitations of" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 343 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 343 + ], + "type": "text", + "content": "traditional methods by introducing noisy condition embedding and interpolated channels. Similarly, Li et al. [111] apply a diffusion model to reconstruct normal data distributions and integrate an auxiliary learning module with pretext tasks to better distinguish between normal and abnormal data. Expanding on these ideas, Zeng et al. [112] improve denoising diffusion probabilistic models (DDPMs) for radio AD by incorporating an AE to learn the distribution of normal signals and their power spectral density (PSD), using reconstruction error to identify anomalies. Li et al. [113] present a Controlled Graph Neural Network (ConGNN) approach based on DDPMs to address the challenge of limited labeled data. Li et al. [114] further explore diffusion models in vehicle trajectory AD, employing decoupled Transformer-based encoders to capture temporal dependencies and spatial interactions among vehicles, significantly improving AUC and F1 scores on real-world and synthetic datasets. Similarly, Pei et al. [115] establish the two-stage diffusion model (TSDM) to mitigate the influences of anomalies in smart grids, where the first stage is a diffusion-based AD component. In multi-class AD, He et al. [116] propose DiAD, a framework that enhances reconstruction accuracy through a combination of a semantic-guided network, spatial-aware feature fusion, and a pre-trained feature extractor to generate anomaly maps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 352, + 301, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 352, + 301, + 375 + ], + "spans": [ + { + "bbox": [ + 45, + 352, + 301, + 375 + ], + "type": "text", + "content": "B. Deep learning methods for Anomaly Detection based on Prediction" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 378, + 302, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 378, + 302, + 700 + ], + "spans": [ + { + "bbox": [ + 45, + 378, + 302, + 700 + ], + "type": "text", + "content": "Prediction-based AD methods operate by forecasting future values or estimating missing attributes and comparing these predictions to the actual observed values. When significant deviations occur, it indicates potential anomalies, as the data deviates from the learned normal patterns. These methods are versatile and can be applied across various data types, leveraging relationships between variables or temporal correlations to detect anomalies. Prediction-based methods excel in scenarios where capturing patterns and trends is essential. By learning underlying structures in the data, whether based on time dependencies or more general interactions between variables, these methods can effectively predict expected outcomes. Deviations from these expectations are flagged as anomalies. This makes prediction-based approaches highly adaptable, capable of functioning across different contexts, including various types of data. In this section, we explore three main approaches for prediction-based AD: Recurrent Neural Networks (RNNs), attention mechanisms, and Graph Neural Networks (GNNs), all of which have demonstrated efficacy in capturing intricate patterns and relationships within data to identify anomalies. These methods allow for flexible and robust AD across various data types by learning underlying patterns, whether they are based on spatial, temporal, or graph-based relationships. By leveraging these approaches, prediction-based methods can effectively model complex interactions, providing reliable detection of unexpected behaviors or deviations from learned patterns." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": "1) RNN-based Anomaly Detection: Recurrent Neural Networks (RNNs) [117] are a special type of neural network designed to process sequential data by capturing dependencies between elements in a sequence. Unlike standard neural" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "content": "networks, RNNs incorporate a state vector " + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "content": " in the hidden layer, allowing them to retain information from previous steps and model sequential patterns. This capability makes them effective in various applications where data has an inherent order, such as event logs, system monitoring, and structured sequences in cybersecurity or industrial processes. For an input " + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "inline_equation", + "content": "x_t" + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "content": ", the update of the state value " + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "content": " and hidden layer output " + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "inline_equation", + "content": "h_t" + }, + { + "bbox": [ + 307, + 54, + 564, + 150 + ], + "type": "text", + "content": " in RNNs can be represented as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 367, + 155, + 563, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 155, + 563, + 174 + ], + "spans": [ + { + "bbox": [ + 367, + 155, + 563, + 174 + ], + "type": "interline_equation", + "content": "\\boldsymbol {s} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {s} \\boldsymbol {s} _ {t - 1} + \\boldsymbol {b} ^ {s}\\right) \\tag {6}", + "image_path": "0c83e3768363b3393233d77288befd0f35cb7bcb4b9c89a9f8ee4f51a0f96f26.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 367, + 171, + 484, + 184 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 171, + 484, + 184 + ], + "spans": [ + { + "bbox": [ + 367, + 171, + 484, + 184 + ], + "type": "interline_equation", + "content": "\\boldsymbol {h} _ {t} = \\operatorname {s o f t m a x} \\left(\\boldsymbol {W} ^ {h} \\boldsymbol {s} _ {t} + \\boldsymbol {b} ^ {h}\\right),", + "image_path": "d588d6f350589007811fb9b7c0e386db40420189cc2b21050b623b552529bf5c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "inline_equation", + "content": "\\sigma(\\cdot)" + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "content": " is the sigmoid activation function, " + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "inline_equation", + "content": "W^x" + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "inline_equation", + "content": "W^s" + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "inline_equation", + "content": "W^h" + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "content": " represent the network weights, and " + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 307, + 189, + 564, + 249 + ], + "type": "text", + "content": " is the network biases. By maintaining a recurrent state, RNNs can effectively capture dependencies across different steps within a sequence, making them well-suited for tasks involving ordered data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 251, + 564, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 251, + 564, + 335 + ], + "spans": [ + { + "bbox": [ + 307, + 251, + 564, + 335 + ], + "type": "text", + "content": "However, RNNs face the problem of exploding or vanishing gradients when dealing with long sequences. Long Short-Term Memory networks (LSTMs) [118], a specialized type of RNN, were introduced to address these issues. Specifically, LSTMs replace the hidden layer of RNNs with an LSTM block consisting of input, output, and forget gates. The inference process of LSTM at time " + }, + { + "bbox": [ + 307, + 251, + 564, + 335 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 307, + 251, + 564, + 335 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 357, + 339, + 507, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 339, + 507, + 354 + ], + "spans": [ + { + "bbox": [ + 357, + 339, + 507, + 354 + ], + "type": "interline_equation", + "content": "\\boldsymbol {f} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x f} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h f} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {f}\\right)", + "image_path": "3aab2761cdd89b226e195041dbb9899bba4c9001b81bef2aec2bb5ff3066b14b.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 358, + 355, + 502, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 355, + 502, + 370 + ], + "spans": [ + { + "bbox": [ + 358, + 355, + 502, + 370 + ], + "type": "interline_equation", + "content": "\\boldsymbol {i} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x i} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h i} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {i}\\right)", + "image_path": "3be6049775f5c5191948d715f5086fae3a8775358da3241c119448a1483ffcc1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 358, + 370, + 563, + 390 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 370, + 563, + 390 + ], + "spans": [ + { + "bbox": [ + 358, + 370, + 563, + 390 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {c}} _ {t} = \\tanh \\left(\\boldsymbol {W} ^ {x \\tilde {c}} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h \\tilde {c}} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {\\tilde {c}}\\right) \\tag {7}", + "image_path": "7308349784488dbf457c989800ee8b1dfb8275a27619297e48677fe456758a50.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 358, + 388, + 437, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 388, + 437, + 399 + ], + "spans": [ + { + "bbox": [ + 358, + 388, + 437, + 399 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c} _ {t} = \\boldsymbol {f} _ {t} \\boldsymbol {c} _ {t - 1} + \\boldsymbol {i} _ {t} \\tilde {\\boldsymbol {c}} _ {t}", + "image_path": "7a3d189f7e4d2c78f15db015f6396740178b4f4fadb8853fe3a70a7dbd4e03f5.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 357, + 402, + 505, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 402, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 357, + 402, + 505, + 416 + ], + "type": "interline_equation", + "content": "\\boldsymbol {o} _ {t} = \\sigma \\left(\\boldsymbol {W} ^ {x o} \\boldsymbol {x} _ {t} + \\boldsymbol {W} ^ {h o} \\boldsymbol {h} _ {t - 1} + \\boldsymbol {b} ^ {o}\\right)", + "image_path": "9ab5864bb5a00f300f042438425cf0cfc8335ed7c480d2804fa41efc69b98043.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 356, + 418, + 432, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 418, + 432, + 430 + ], + "spans": [ + { + "bbox": [ + 356, + 418, + 432, + 430 + ], + "type": "interline_equation", + "content": "\\boldsymbol {h} _ {t} = \\boldsymbol {o} _ {t} \\tanh \\left(\\boldsymbol {c} _ {t}\\right),", + "image_path": "2a7211cd592f142299cc19606c6bbd41196006c6e92189f745f7644a0f048d6f.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "inline_equation", + "content": "i_{t}" + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "inline_equation", + "content": "o_{t}" + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "content": " are the forget, input and output gate weights, respectively. " + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "inline_equation", + "content": "c_{t}" + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "content": " represents the cell state of LSTM, and " + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "inline_equation", + "content": "\\tanh(\\cdot)" + }, + { + "bbox": [ + 307, + 434, + 564, + 579 + ], + "type": "text", + "content": " is the hyperbolic tangent activation function. By controlling the weights of the forget, input, and output gates, LSTM determines the importance of historical time series information and the current input on the current output, thus effectively mitigating issues of gradient vanishing and allowing robust modeling of complex sequences. Reference [119] provides comprehensive evidence of LSTM's effectiveness in AD across various technical systems, demonstrating its superiority in learning complex temporal behaviors and accurately identifying anomalies." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "text", + "content": "The Gated Recurrent Unit (GRU) [120] is a simplified version of LSTM that only includes an update gate and a reset gate and uses the hidden state alone to represent both short-term and long-term information. These different types of RNNs can be used in prediction-based AD tasks, with the specific detection and inference method illustrated in Fig. 5. RNNs, LSTMs, and GRUs take time series data from " + }, + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "inline_equation", + "content": "t - w" + }, + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "inline_equation", + "content": "t - 1" + }, + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "text", + "content": " as input, and their pre-trained neural networks use these temporally ordered data to predict the single-step or multi-step future values of the univariate or multivariate time series. If the difference between the actual and predicted values is below a threshold, no anomaly is detected; if the difference exceeds the threshold, an anomaly is detected and the spatiotemporal location of the anomaly is identified." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 82, + 553, + 335 + ], + "blocks": [ + { + "bbox": [ + 211, + 56, + 400, + 75 + ], + "lines": [ + { + "bbox": [ + 211, + 56, + 400, + 75 + ], + "spans": [ + { + "bbox": [ + 211, + 56, + 400, + 75 + ], + "type": "text", + "content": "TABLE IV DIFFUSION-BASED MODELS IN ANOMALY DETECTION" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 82, + 553, + 335 + ], + "lines": [ + { + "bbox": [ + 59, + 82, + 553, + 335 + ], + "spans": [ + { + "bbox": [ + 59, + 82, + 553, + 335 + ], + "type": "table", + "html": "
PaperTechniqueApproach TypeStrengthWeaknessData TypeYear
[110]DiffusionReconstructionThe latent diffusion model (LDM) used in this method achieves state-of-the-art performance in surface AD by generating high-quality, semantically correct reconstructions, effectively avoiding overfitting to anomalies.It less suitable for real-time applications or environments with limited computational resources.Image data2023
[112]Diffusion+VAEReconstructionThe AE-DDPMs algorithm effectively improves stability and reduces computational costs in radio AD, outperforming GAN-based methods in complex electromagnetic environments.The anomalies in the experimental data are artificially generated, rather than originating from real-world conditions, which may limit the model's applicability to genuine, real-world scenarios.radio signal data2023
[113]Diffusion+GNNPredictionConGNN effectively addresses the issue of limited labeled data by generating augmented graph data using a graph-specific diffusion model.The reliance on graph-specific augmentation might not generalize well to other types of data, potentially limiting its applicability beyond graph-based AD.Image data2023
[111]Diffusion+VAEHybridSDAD effectively enhances AD by combining self-supervised learning for discriminative data representation with denoising diffusion.The generation of pseudo anomalies relies solely on standard Gaussian sampling, which may not fully capture the complexity of real anomalies, limiting the model's ability to accurately simulate genuine abnormal data.Structure data2024
[114]Diffusion+TransformerHybridDiffTAD effectively models temporal dependencies and spatial interactions in vehicle trajectories through diffusion models, significantly improving AD accuracy and robustness to noise.The anomalies are primarily evaluated on synthetic datasets, which may not fully reflect the complexity and diversity of real-world trajectory data.Vehicle trajectory data2024
", + "image_path": "99343b86e303b16ed72ac102346e1a4f94400e20ddc1bebcd295923286e32817.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 47, + 338, + 564, + 439 + ], + "blocks": [ + { + "bbox": [ + 47, + 338, + 564, + 439 + ], + "lines": [ + { + "bbox": [ + 47, + 338, + 564, + 439 + ], + "spans": [ + { + "bbox": [ + 47, + 338, + 564, + 439 + ], + "type": "image", + "image_path": "776e31311c1c4c6521d678b5659cbccf97515cff7d507f59b7a11d83bd281e30.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 447, + 479, + 459 + ], + "lines": [ + { + "bbox": [ + 45, + 447, + 479, + 459 + ], + "spans": [ + { + "bbox": [ + 45, + 447, + 479, + 459 + ], + "type": "text", + "content": "Fig. 5. RNN-based application example for time series data anomaly detection: (a) RNN-based, (b) LSTM-based, (c) GRU-based." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 472, + 302, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 472, + 302, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 472, + 302, + 749 + ], + "type": "text", + "content": "Current RNN-based AD primarily focuses on improving RNN algorithms tailored to AD tasks and integrating RNN with other methods for AD. The method in [121] employs a pruning algorithm to reduce the number of false data points, enabling the LSTM-based AD approach to better address the challenges posed by the extremely uneven distribution of railway traffic data. LSTM combined with AE [122], VAE [123], and Singular Value Decomposition (SVD) [124] has also been used to identify anomalies in Controller Area Networks (CANs) [125], electrocardiograms, and Internet monitoring data. GANs based on adversarial learning have also been integrated into the time series learning of LSTM, achieving very high performance in scenarios with few features [95], extremely imbalanced training sets, and noise interference [96]. CNN is also integrated into LSTM in a serial [126], parallel [127], or as a foundational layer [128] to better extract the spatiotemporal correlations of multidimensional time series, thereby enhancing the performance of AD. GRUs, compared to LSTMs, have a more streamlined architecture, resulting in lower computational complexity during training and execution of AD tasks, and they tend to perform better on certain less complex sequential data. For instance, GRUs enhance interpretability by uncovering latent correlations in" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 472, + 563, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 472, + 563, + 531 + ], + "spans": [ + { + "bbox": [ + 307, + 472, + 563, + 531 + ], + "type": "text", + "content": "multivariate time series data from industrial control system sensors [129]. Similar to LSTMs, GRUs can also be combined with AEs [130] or VAEs [25] in an encoder-decoder architecture to mitigate the effects of noise and anomalies, thereby improving the accuracy of AD." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 533, + 564, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 564, + 700 + ], + "type": "text", + "content": "2) Attention-based Anomaly Detection: The attention mechanism was initially applied in machine translation [131], with its core idea being to enable the neural network to focus on the relevant parts of the input values. While attention-based methods have shown great promise in time series AD, their applications are not limited to temporal data. These methods can effectively capture dependencies in various types of data, including spatial, spatiotemporal, and multimodal datasets. This flexibility broadens their use cases across different AD tasks. Compared to RNN-based approaches, they are better suited for long or complex sequences because attention can compute dependencies between all positions in the sequence simultaneously, while RNNs process sequences sequentially, step by step." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 700, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 700, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 700, + 564, + 750 + ], + "type": "text", + "content": "Figure 6 illustrates a typical attention-based model for AD. Among attention-based methods, the self-attention mechanism is particularly effective in capturing global dependencies across various types of sequential data, including temporal," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 52, + 531, + 167 + ], + "blocks": [ + { + "bbox": [ + 73, + 52, + 531, + 167 + ], + "lines": [ + { + "bbox": [ + 73, + 52, + 531, + 167 + ], + "spans": [ + { + "bbox": [ + 73, + 52, + 531, + 167 + ], + "type": "image", + "image_path": "019b206e929dfe093eeec77a1d42acc96182676e7e7c79b8a51a4f5bc4ca29c2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 175, + 565, + 212 + ], + "lines": [ + { + "bbox": [ + 45, + 175, + 565, + 212 + ], + "spans": [ + { + "bbox": [ + 45, + 175, + 565, + 212 + ], + "type": "text", + "content": "Fig. 6. Attention-based model for anomaly detection. The model first embeds sequential data using input embedding and positional encoding to preserve temporal dependencies. The multi-head attention mechanism captures long-range dependencies by processing interactions between all time steps. The feedforward layer then refines feature representations, and a dense interpolation layer enhances anomaly-related features before passing them to a fully connected network (FNN) for final AD." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "spans": [ + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": "spatial, and spatiotemporal inputs. For an input dataset " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = [x_{1}, x_{2}, \\dots, x_{t}]" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", the queries, keys, and values are defined as: " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "Q = X W_{Q}" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "K = X W_{K}" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "V = X W_{V}" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "W_{Q}" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "W_{K}" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "W_{V}" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": " are trainable weight matrices. The attention weights are then computed based on " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 45, + 226, + 301, + 287 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 102, + 290, + 300, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 290, + 300, + 323 + ], + "spans": [ + { + "bbox": [ + 102, + 290, + 300, + 323 + ], + "type": "interline_equation", + "content": "\\alpha_ {i j} = \\frac {\\exp \\left(\\boldsymbol {Q} _ {i} \\boldsymbol {K} _ {j} ^ {\\top} / \\sqrt {\\boldsymbol {d} _ {k}}\\right)}{\\sum_ {j = 1} ^ {T} \\exp \\left(\\boldsymbol {Q} _ {i} \\boldsymbol {K} _ {j} ^ {\\top} / \\sqrt {\\boldsymbol {d} _ {k}}\\right)}, \\tag {8}", + "image_path": "c152336502e48e3f718bf9788652d8fed7e7c775782ddbca7740d63108b86f8a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "spans": [ + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "type": "inline_equation", + "content": "d_k" + }, + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "type": "text", + "content": " is the dimension of the keys. Finally, the output of the self-attention-based neural network, which takes into account the importance of each input value, is given by Attention " + }, + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "type": "inline_equation", + "content": "(Q, K, V) = \\alpha V" + }, + { + "bbox": [ + 45, + 326, + 301, + 375 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 378, + 300, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 378, + 300, + 415 + ], + "spans": [ + { + "bbox": [ + 45, + 378, + 300, + 415 + ], + "type": "text", + "content": "To enable the model to capture features of various patterns, multi-head attention is also well-suited for AD. The calculation of multiple heads is expressed as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 420, + 299, + 445 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 420, + 299, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 420, + 299, + 445 + ], + "type": "interline_equation", + "content": "\\operatorname {M u l t i h e a d} \\left(\\boldsymbol {Q}, \\boldsymbol {K}, \\boldsymbol {V}\\right) = \\operatorname {C o n c a t} \\left(\\operatorname {h e a d} _ {1}, \\dots , \\operatorname {h e a d} _ {h}\\right) \\boldsymbol {W} _ {O}, \\tag {9}", + "image_path": "82d18c91a06f87e742d55753cfa9e1a8bc54048a8c709dd6c0549a643b62acec.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "spans": [ + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "content": "where each head is computed as " + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{head}_i =" + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "content": " Attention " + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "inline_equation", + "content": "(\\mathbf{Q}\\mathbf{W}_{Q_i},\\mathbf{K}\\mathbf{W}_{K_i},\\mathbf{V}\\mathbf{W}_{V_i})" + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "content": " . Here, " + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "inline_equation", + "content": "W_{Q_i}" + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "inline_equation", + "content": "W_{K_i}" + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "inline_equation", + "content": "W_{V_i}" + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "content": " are trainable parameters for different heads, and " + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "inline_equation", + "content": "W_{O}" + }, + { + "bbox": [ + 45, + 445, + 301, + 588 + ], + "type": "text", + "content": " is the linear transformation matrix for the output. Concat(head1,,headh) concatenates the outputs of all attention heads along the feature dimension. Attention-based methods can effectively capture long-term dependencies, improve computational efficiency, and enhance the interpretability of AD through visualized attention weight values. When applied to AD, differences in the distribution of attention weights between normal and anomalous time series can serve as the basis for AD." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 593, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 593, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 593, + 301, + 750 + ], + "type": "text", + "content": "In the field of AD, particularly for time series data, there has been a growing number of studies proposing deep learning methods based on attention mechanisms. Autoencoders that combine convolution, LSTM, and self-attention mechanisms can better extract complex features from multivariate time series data and robustly detect anomalies in high noise conditions [132]. The Transformer, as a well-known attention-based model, has demonstrated superior performance in unsupervised prediction-based time series AD compared to LSTM, as it can learn the dynamic patterns of sequential data through self-attention mechanisms [133]. The Transformer-based AD utilizes attention-based sequence encoders for rapid inference, achieving an F1 score improvement of up to " + }, + { + "bbox": [ + 45, + 593, + 301, + 750 + ], + "type": "inline_equation", + "content": "17\\%" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "spans": [ + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "type": "text", + "content": "on public datasets and reducing training time by as much as " + }, + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "type": "text", + "content": " compared to the baseline [134]. Despite its outstanding capabilities, the Transformer still faces certain bottlenecks in AD. Attention-based methods are prone to overfitting when data is insufficient. The method in [92] seamlessly integrates contrastive learning and GAN into the Transformer, utilizing data augmentation techniques and geometric distribution masking to expand the training data, thereby enhancing data diversity and improving accuracy by " + }, + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "type": "inline_equation", + "content": "9.28\\%" + }, + { + "bbox": [ + 307, + 226, + 564, + 335 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 338, + 564, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 564, + 613 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 564, + 613 + ], + "type": "text", + "content": "Attention mechanisms are also frequently applied in graph neural networks to jointly detect anomalies in time series data. Reference [135] proposes a novel efficient Transformer model based on graph learning methods, employing two-stage adversarial training to train the AD model and utilizing prototypical networks to apply the model to anomaly classification. A contrastive time-frequency reconstruction network for unsupervised AD is used for AD and localization [136], where attention mechanisms and graph convolutional networks update the feature information of each time point, combining points with similar feature relationships to dilute the influence of anomalous points on normal points. Reference [137] models the correlations between temporal variables using graph convolutional networks, while also using an attention-based reconstruction model to output the importance of time series data within each time window, achieving an average AD F1 score exceeding 0.96. For multimodal data, a multimodal graph attention network (M-GAT) and temporal convolutional networks are used to capture spatial-temporal correlations in multimodal time series and correlations between modalities [138], ultimately outputting anomaly scores through reconstruction or prediction. More details about the application of GNNs in AD will be elaborated in the next subsection." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 616, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 616, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 616, + 564, + 750 + ], + "type": "text", + "content": "In addition to GNNs, CNNs can also incorporate attention mechanisms to enhance various metrics of AD. Reference [139] effectively captures the local features of subsequences by leveraging the locality of CNNs and combining it with positional embeddings. At the same time, Zhu et al. [139] employ attention mechanisms to extract global features from the entire time series, thereby enhancing the effectiveness and potential of detection. Many works have also introduced LSTM to extract temporal correlations in time series data based on CNN models with attention mechanisms. For example, Sun et al. [140] employ a sequential approach where 1D convolution is" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 210 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 210 + ], + "type": "text", + "content": "first used to extract abstract features of the signal values at each time step, which are then input into a bidirectional long short-term memory network (Bi-LSTM), ultimately combining with attention mechanisms to make the model focus on locally important time steps. Meanwhile, Le et al. [141] integrate convolutional layers, LSTM layers, and self-attention layers into an autoencoder architecture to better extract complex features from multivariate time series. Similarly, Pei et al. [126] employ additional SVM to classify the attention weights based on a CNN-LSTM model with attention mechanisms to determine whether cyber-attacks have occurred in energy systems. The input data are the multimodal measurements from the deployed sensors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "text", + "content": "3) GNN-based Anomaly Detection: Graph Neural Networks (GNNs) have gained increasing attention in AD tasks, as many types of data can be naturally represented as graph structures [142]. Wu et al. [143] have demonstrated the effectiveness of GNNs in identifying anomalies within complex graph-structured data environments. As neural network models specifically designed to handle graph-structured data, GNNs define nodes, edges, and graphs, where nodes represent individual elements in the dataset, such as data points in a sequence, sensor readings in multivariate data, or entities in relational datasets—denoted as the set " + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "text", + "content": ". Edges capture the relationships or dependencies between these elements, denoted as the set " + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "text", + "content": ", and can represent temporal correlations, spatial dependencies, or more abstract relational connections depending on the context. The graph, represented as " + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "inline_equation", + "content": "G = (V, E)" + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "text", + "content": ", captures the overall structure formed by nodes and edges. The primary operations in GNN training are message passing and aggregation, which are used to update and learn node features. Specifically, during message passing, each node receives information from its neighboring nodes and updates its own state. For a node " + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 213, + 301, + 477 + ], + "type": "text", + "content": ", the message passing formula is given as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 480, + 300, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 480, + 300, + 510 + ], + "spans": [ + { + "bbox": [ + 79, + 480, + 300, + 510 + ], + "type": "interline_equation", + "content": "\\boldsymbol {m} _ {v} ^ {(k)} = \\sum_ {u \\in \\mathcal {N} (v)} M S G \\left(\\boldsymbol {h} _ {u} ^ {(k - 1)}, \\boldsymbol {h} _ {v} ^ {(k - 1)}, \\boldsymbol {e} _ {u v}\\right), \\tag {10}", + "image_path": "e1d772cb17cdcc7baea708f3a4c2d5a2999a5ea4551a1a3a2e14e9eef439af94.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "spans": [ + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(v)" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": " denotes the set of neighboring nodes of " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "h_u" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "h_v" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": " are the features of nodes " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": " at layer " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "inline_equation", + "content": "e_{uv}" + }, + { + "bbox": [ + 45, + 514, + 301, + 574 + ], + "type": "text", + "content": " represents the edge features. Subsequently, the received messages are aggregated with the current node state, and the node features are updated as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 100, + 578, + 299, + 599 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 578, + 299, + 599 + ], + "spans": [ + { + "bbox": [ + 100, + 578, + 299, + 599 + ], + "type": "interline_equation", + "content": "\\boldsymbol {h} _ {v} ^ {(k)} = \\text {U P D A T E} \\left(\\boldsymbol {h} _ {v} ^ {(k - 1)}, \\boldsymbol {m} _ {v} ^ {(k)}\\right), \\tag {11}", + "image_path": "956f8a63368e4835961a7080cbc90098d7844aea31a82a4bd78c0123a0e5a997.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 602, + 228, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 602, + 228, + 615 + ], + "spans": [ + { + "bbox": [ + 45, + 602, + 228, + 615 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 602, + 228, + 615 + ], + "type": "inline_equation", + "content": "UPDATE(\\cdot, \\cdot)" + }, + { + "bbox": [ + 45, + 602, + 228, + 615 + ], + "type": "text", + "content": " is the update function." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 616, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 616, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 616, + 301, + 750 + ], + "type": "text", + "content": "As illustrated in Fig. 7, which uses time series data as an example, GNNs treat each variable in the multivariate time series as a node to capture complex relationships between different dimensions. While the primary focus here is on the predictive capabilities of GNNs, it is worth noting that they are also effective in reconstruction-based AD. The final decision on whether the input sequence is anomalous is primarily based on prediction errors or graph structure differences, with reconstruction errors serving as a supplementary indicator. GNN-based AD methods excel at modeling complex dependencies between time steps or sensors, offering flexibility to handle" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 49, + 563, + 220 + ], + "blocks": [ + { + "bbox": [ + 310, + 49, + 563, + 220 + ], + "lines": [ + { + "bbox": [ + 310, + 49, + 563, + 220 + ], + "spans": [ + { + "bbox": [ + 310, + 49, + 563, + 220 + ], + "type": "image", + "image_path": "3330583da44f04aadda892ec09bf36e2ea653e3123da2117b4ea223ff767ce02.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 230, + 564, + 285 + ], + "lines": [ + { + "bbox": [ + 307, + 230, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 564, + 285 + ], + "type": "text", + "content": "Fig. 7. GNN-based method for anomaly detection with time series data. Time series data is embedded into a graph structure, where a spatial-temporal GNN extracts dependencies. The reconstruction module then estimates the original data. Anomalies are detected based on graph relational discrepancies (differences in predicted graph structure) and prediction discrepancies (differences between reconstructed and actual time series)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 300, + 563, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 300, + 563, + 359 + ], + "spans": [ + { + "bbox": [ + 307, + 300, + 563, + 359 + ], + "type": "text", + "content": "both static and dynamic relationships across diverse time series structures. However, they still face challenges such as high computational complexity on large-scale graphs and difficulties in constructing optimal edge and graph configurations [144]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 360, + 564, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 360, + 564, + 563 + ], + "spans": [ + { + "bbox": [ + 307, + 360, + 564, + 563 + ], + "type": "text", + "content": "In prediction-based GNN for AD, GDN [145] is a representative work that combines a structure learning approach with GNN, additionally using attention weights to predict time series values and detect anomalies based on the predictions. Similar methods include GTA [146] and CST-GL [147]. Furthermore, Liu et al. [148] propose a GNN-based contrastive learning model that generates prediction scores from high-dimensional attributes and local structures to detect anomalies, outperforming state-of-the-art methods on seven benchmark datasets. Beyond prediction-based methods, there are also reconstruction-based GNN approaches. For example, MTAD-GAT [149] employs a graph attention network as a spatiotemporal encoder to learn dependencies across variables and time, reconstructing the time series with a backbone reconstructor and identifying anomalies based on reconstruction errors. Similar techniques include VGCRN [150] and FuSAGNet [151]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 578, + 563, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 578, + 563, + 601 + ], + "spans": [ + { + "bbox": [ + 308, + 578, + 563, + 601 + ], + "type": "text", + "content": "C. Deep learning methods for Anomaly Detection based on Hybrid Method" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 605, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 605, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 605, + 564, + 750 + ], + "type": "text", + "content": "In AD, reconstruction-based and prediction-based methods offer distinct but complementary approaches to identifying anomalies. Both methods rely on the discrepancy between the model's output and the actual input data as an indicator of abnormality. However, they diverge in how they handle data and their areas of application. Reconstruction-based methods focus on learning the underlying distribution of normal data. Once trained, the model attempts to recreate the input data. The reconstruction error, measured as the difference between the original data and its reconstruction, serves as a key indicator of anomalies. A high reconstruction error suggests that the data deviates from the normal patterns learned by the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 54, + 301, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 54, + 301, + 606 + ], + "spans": [ + { + "bbox": [ + 45, + 54, + 301, + 606 + ], + "type": "text", + "content": "model. This approach is particularly effective in cases where understanding the full structure or distribution of the data is crucial, such as in image-based AD or other high-dimensional datasets. In contrast, prediction-based methods focus on forecasting specific attributes or missing values from the data, rather than reconstructing the entire input. These methods typically predict future values or infer missing data points by leveraging known features. If the predicted values significantly deviate from the actual values, this signals a potential anomaly. Prediction-based methods are often more suited to feature-rich datasets, where predicting specific variables can help identify irregular patterns. For instance, in applications like fraud detection, predicting expected behaviors or transactions can reveal anomalies when the predicted outcomes differ from the observed ones. While both methods differ in their data processing approaches, they can be highly complementary. In many cases, combining reconstruction-based and prediction-based techniques within a hybrid framework allows for more robust AD. Reconstruction models capture the overall structure and patterns in the data, while prediction models focus on detecting deviations in specific variables or features. This combination can provide a more comprehensive solution for identifying anomalies in complex datasets across various domains. Tang et al. [152] utilize a U-Net module as the prediction module to perform future frame prediction, amplifying reconstruction errors for abnormal events, while another U-Net module is used as the reconstruction module to enhance predicted frames for normal events, thus improving the effectiveness of AD. Lv et al. [31] adopt a dilated convolution-based autoencoder to integrate prediction errors and reconstruction errors into the output anomaly scores, effectively improving the generalization capability of the detection model. Liu et al. [153] leverage a reconstruction model and a prediction model within an end-to-end semi-supervised AD framework to effectively capture inter-variable correlations and temporal dependencies in multivariate time series data from wind turbines. Additionally, by incorporating an auxiliary discriminator with adversarial training, the model can progressively improve performance using limited labeled data, enhancing the transition from unsupervised to supervised AD. Wei et al. [154] propose a hybrid deep-learning model combining LSTM and autoencoder for AD in indoor air quality data, where the LSTM captures long-term dependencies in time-series data and the autoencoder uses reconstruction loss to detect anomalies, effectively addressing both temporal correlations and reconstruction errors for improved detection accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 624, + 155, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 624, + 155, + 636 + ], + "spans": [ + { + "bbox": [ + 46, + 624, + 155, + 636 + ], + "type": "text", + "content": "D. Summary and Insights" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "type": "text", + "content": "This section introduces three types of deep learning-based AD methods: reconstruction-based, prediction-based, and hybrid approaches. Reconstruction-based methods are particularly effective in handling high-dimensional and unsupervised data by learning intrinsic patterns and identifying deviations through reconstruction errors. Prediction-based methods excel at modeling temporal dependencies in time-series data, enabling the detection of unexpected patterns in dynamic environments. Hybrid approaches combine these strengths" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 54, + 564, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 54, + 564, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 54, + 564, + 116 + ], + "type": "text", + "content": "to address complex scenarios where multiple anomaly types coexist. Notably, these methods demonstrate the power of deep learning in capturing intricate patterns and dependencies that traditional methods often miss, making them indispensable for tackling diverse and challenging AD tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 328, + 128, + 545, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 128, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 328, + 128, + 545, + 152 + ], + "type": "text", + "content": "V. INTEGRATE TRADITIONAL METHOD AND DEEP LEARNING METHOD" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 155, + 564, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 155, + 564, + 286 + ], + "spans": [ + { + "bbox": [ + 307, + 155, + 564, + 286 + ], + "type": "text", + "content": "In the field of AD, traditional methods and deep learning approaches each offer unique advantages. Traditional methods, such as clustering [155] and Support Vector Data Description [156], are often simpler, more interpretable, and computationally efficient. These methods excel in providing transparent decision-making processes, making them suitable for applications where model interpretability is crucial. On the other hand, deep learning methods, with their ability to model complex, high-dimensional data distributions, offer enhanced detection accuracy and adaptability, especially for large datasets and unstructured data like images and sequences." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 287, + 565, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 287, + 565, + 384 + ], + "spans": [ + { + "bbox": [ + 307, + 287, + 565, + 384 + ], + "type": "text", + "content": "The integration of traditional and deep learning methods aims to leverage the interpretability and simplicity of traditional methods with the robustness and flexibility of deep learning techniques. By combining these approaches, researchers seek to create hybrid models that maintain accuracy while offering insights into the underlying decision-making process, improving both detection power and model transparency." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 399, + 402, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 399, + 402, + 410 + ], + "spans": [ + { + "bbox": [ + 309, + 399, + 402, + 410 + ], + "type": "text", + "content": "A. Clustering method" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 413, + 565, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 413, + 565, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 413, + 565, + 750 + ], + "type": "text", + "content": "Clustering models play a crucial role in unsupervised AD, particularly for textual data. These models group similar data points based on their proximity in feature space and identify anomalies as points that deviate from established clusters [157]. Common clustering techniques, such as k-means [158], Density-Based Spatial Clustering of Applications with Noise (DBSCAN) [159], and hierarchical clustering [160], work effectively for simpler datasets and offer the advantage of interpretability. By integrating clustering methods with deep learning, such as applying clustering post feature extraction by a neural network, it is possible to improve detection accuracy while maintaining an interpretable clustering structure. This hybrid approach is particularly useful in cases where data distribution varies, and flexible, context-aware AD is required. For instance, Li et al. [161] propose a method that extends fuzzy clustering with a reconstruction criterion and Particle Swarm Optimization (PSO) to detect anomalies in both amplitude and shape. This highlights how traditional clustering methods can benefit from optimization techniques to handle diverse anomaly types. Similarly, Markovitz et al. [162] introduce an innovative approach for AD in human actions by working directly on human pose graphs extracted from video sequences. By mapping these graphs to a latent space, clustering them, and applying a Dirichlet process-based mixture model, the method effectively leverages probabilistic modeling to enhance the robustness and flexibility of clustering for action recognition. In video AD, Qiu et al. [163] propose a convolution-enhanced self-attentive video auto-encoder" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 258 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 258 + ], + "type": "text", + "content": "integrated with a dual-scale clustering module based on the K-means algorithm. This approach effectively distinguishes normal and abnormal video data by enhancing feature representations and addressing the fuzzy boundaries between them. Additionally, Peng et al. [33] introduce a multivariate ELM-MI framework combined with a dynamic kernel selection method. By employing hierarchical clustering on unlabeled data to determine kernels, this method enables unsupervised online detection of various anomaly types, including point and group anomalies, while reducing computational costs and improving robustness. These studies collectively highlight the potential of hybrid approaches that integrate clustering with advanced techniques like deep learning, probabilistic modeling, or optimization frameworks. Such methods leverage the interpretability and simplicity of traditional clustering while addressing its limitations in handling complex data, offering a promising pathway for accurate and flexible AD." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 268, + 141, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 268, + 141, + 280 + ], + "spans": [ + { + "bbox": [ + 46, + 268, + 141, + 280 + ], + "type": "text", + "content": "B. Normalizing Flows" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 282, + 301, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 282, + 301, + 400 + ], + "spans": [ + { + "bbox": [ + 45, + 282, + 301, + 400 + ], + "type": "text", + "content": "Normalizing Flows (NF) [164] offer a probabilistic framework for AD by estimating the probability distribution of data. Using a sequence of invertible transformations, NFs can model complex distributions, making them particularly effective for identifying anomalies as low-probability events. When integrated with deep learning models, such as CNNs or RNNs, NFs act as precise probabilistic estimators, complementing the feature extraction capabilities of deep networks. This hybrid framework enhances AD, particularly in high-dimensional or unstructured datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 402, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 402, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 402, + 301, + 723 + ], + "type": "text", + "content": "For instance, Yu et al. [165] propose FastFlow, a 2D normalizing flow module integrated with deep feature extractors like ResNet and Vision Transformers. By effectively modeling feature distributions and capturing both local and global relationships, FastFlow achieves state-of-the-art performance, with a " + }, + { + "bbox": [ + 45, + 402, + 301, + 723 + ], + "type": "inline_equation", + "content": "99.4\\%" + }, + { + "bbox": [ + 45, + 402, + 301, + 723 + ], + "type": "text", + "content": " AUC on the MVTec AD dataset, while maintaining high inference efficiency. Similarly, Cho et al. [166] introduce Implicit Two-path Autoencoder (ITAE), which reconstructs normal video patterns by implicitly modeling appearance and motion features through two encoders and a shared decoder. NF enhances ITAE by estimating the density of normal embeddings, enabling robust detection of out-of-distribution anomalies, with strong results across six surveillance benchmarks. For multivariate time series data, Zhou et al. [167] combine a graph structure learning model with entity-aware normalizing flows to capture interdependencies and evolving relations among entities. By estimating entity-specific densities and employing a clustering strategy for similar entities, the extended MTGFlow_cluster improves density estimation accuracy, demonstrating superior performance on six benchmark datasets. Further expanding on the use of graphs, Dai et al. [168] propose Graph-Augmented Normalizing Flow (GANF), which incorporates a Bayesian network to model causal relationships among time series. This approach factorizes joint probabilities into conditional probabilities, improving density estimation and enabling effective detection of anomalies in low-density regions, as well as identifying distribution drifts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "content": "These studies collectively highlight the strengths of integrating Normalizing Flows with traditional and deep learning-" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 54, + 564, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 54, + 564, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 54, + 564, + 140 + ], + "type": "text", + "content": "based methods. By combining the interpretability and precision of probabilistic models with the expressive power of deep networks or graph structures, these hybrid approaches address the challenges of complex data distributions, offering scalable and robust solutions for diverse AD tasks. This synergy underscores the potential of such methods to push the boundaries of accuracy and adaptability in real-world applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 154, + 462, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 154, + 462, + 166 + ], + "spans": [ + { + "bbox": [ + 309, + 154, + 462, + 166 + ], + "type": "text", + "content": "C. Support Vector Data Description" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 168, + 564, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 168, + 564, + 347 + ], + "spans": [ + { + "bbox": [ + 307, + 168, + 564, + 347 + ], + "type": "text", + "content": "Support Vector Data Description (SVDD) [156] is a traditional machine learning method used to define a boundary around normal data points, effectively distinguishing them from anomalies. Unlike binary classification, SVDD is particularly effective for one-class classification tasks, where only normal data is available. This approach is computationally efficient and interpretable, as it provides a clear boundary between normal and abnormal points. By integrating SVDD with deep learning, researchers can enhance the boundary definition based on high-dimensional features extracted by a neural network, resulting in a model that combines the boundary precision of SVDD with the feature richness of deep learning. This hybrid model is highly effective in scenarios where boundary clarity and interpretability are paramount, such as in industrial monitoring or fraud detection." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 348, + 564, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 348, + 564, + 574 + ], + "spans": [ + { + "bbox": [ + 307, + 348, + 564, + 574 + ], + "type": "text", + "content": "To improve latent representations, Zhou et al. [169] propose Deep SVDD-VAE, which jointly optimizes VAE and SVDD. The VAE reconstructs input data, and SVDD simultaneously defines a spherical boundary in the latent space, ensuring separability of normal and anomalous instances. This joint optimization significantly outperforms traditional AE-based methods, as shown on MNIST, CIFAR-10, and GTSRB datasets. For variable-length time series data, Ergen et al. [124] introduce an LSTM-based AD framework, where LSTM and SVDD are jointly optimized using modified objectives. This method extends seamlessly to GRU architectures, demonstrating strong performance across unsupervised, semisupervised, and supervised settings. Besides, Zhang et al. [170] propose Deep Structure Preservation SVDD (DSPSVDD), which simultaneously minimizes hypersphere volume and network reconstruction error. This dual objective ensures deep feature preservation and enhances AD performance, outperforming traditional SVDD models on datasets like MNIST and MVTec AD." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 574, + 564, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 574, + 564, + 647 + ], + "spans": [ + { + "bbox": [ + 308, + 574, + 564, + 647 + ], + "type": "text", + "content": "These studies highlight the strengths of combining SVDD with deep learning, where deep models enhance feature representation while SVDD ensures boundary precision. This hybrid framework effectively addresses limitations in both methods, offering a scalable and interpretable solution for complex AD tasks across diverse domains." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 662, + 419, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 662, + 419, + 673 + ], + "spans": [ + { + "bbox": [ + 309, + 662, + 419, + 673 + ], + "type": "text", + "content": "D. Summary and Insights" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 677, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 564, + 750 + ], + "type": "text", + "content": "This section explores the integration of traditional and deep learning methods for AD, highlighting how their complementary strengths can be combined. Traditional methods, known for their simplicity, interpretability, and computational efficiency, excel in scenarios where transparency is critical. In contrast, deep learning methods offer superior adaptability" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 139 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 139 + ], + "type": "text", + "content": "and accuracy, particularly for high-dimensional and unstructured data. By integrating these approaches, hybrid models can leverage the interpretability of traditional methods while retaining the robustness and flexibility of deep learning. This fusion not only enhances AD performance but also bridges the gap between accuracy and model transparency, making it a promising direction for future research." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 155, + 258, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 155, + 258, + 166 + ], + "spans": [ + { + "bbox": [ + 88, + 155, + 258, + 166 + ], + "type": "text", + "content": "VI. OPEN ISSUES AND FUTURE WORKS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 171, + 128, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 171, + 128, + 182 + ], + "spans": [ + { + "bbox": [ + 45, + 171, + 128, + 182 + ], + "type": "text", + "content": "A. Data Collection" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 187, + 301, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 187, + 301, + 295 + ], + "spans": [ + { + "bbox": [ + 45, + 187, + 301, + 295 + ], + "type": "text", + "content": "Data scarcity and class imbalance remain major challenges in AD. Since anomalies are rare, obtaining large labeled datasets is costly and time-consuming, especially when expert annotation is required. Supervised learning struggles due to the lack of abnormal samples, while the overwhelming presence of normal data biases models toward common patterns. This problem is particularly critical in cybersecurity, healthcare, and industrial monitoring, where undetected anomalies can have serious consequences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 296, + 302, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 296, + 302, + 547 + ], + "spans": [ + { + "bbox": [ + 45, + 296, + 302, + 547 + ], + "type": "text", + "content": "Several approaches mitigate these issues. Semi-supervised and unsupervised learning exploit normal data distributions to detect deviations without requiring labeled anomalies [171] [172]. Data augmentation, synthetic data generation, and oversampling improve data balance by increasing the number of anomalous examples, helping models generalize better [173] [174]. Despite these advancements, challenges remain. Semi-supervised methods struggle with subtle anomalies that closely resemble normal data. Augmentation techniques, often based on simple transformations, may fail to capture complex domain-specific variations. Similarly, synthetic data generation may not fully reflect real-world anomaly diversity, leading to models biased toward normal samples. Moreover, even with augmentation, models risk overfitting to the majority class, compromising anomaly detection performance. Ensuring that models remain sensitive to rare anomalies while maintaining accuracy on normal data remains an ongoing challenge. Future research may focus on refining self-supervised learning [175], improving the diversity of synthetic samples [176], and developing more adaptive anomaly detection frameworks to enhance robustness in real-world applications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 565, + 173, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 565, + 173, + 577 + ], + "spans": [ + { + "bbox": [ + 45, + 565, + 173, + 577 + ], + "type": "text", + "content": "B. Computational Complexity" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 580, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 580, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 580, + 301, + 723 + ], + "type": "text", + "content": "In AD, computational complexity is a crucial factor, especially for systems operating in real-time environments or handling large-scale datasets. The efficiency of an algorithm directly impacts its feasibility in fields like industrial monitoring, cybersecurity, and autonomous systems, where swift detection is essential. Many advanced models, particularly deep learning approaches like autoencoders, GANs, and LSTMs, are computationally intensive due to their complex architectures and iterative learning processes. This often leads to trade-offs between detection accuracy and computational efficiency, with continuous efforts aimed at optimizing models to reduce computational demands without sacrificing performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 750 + ], + "type": "text", + "content": "Moreover, AD models frequently require substantial memory resources, especially when dealing with high-dimensional" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 55, + 564, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 55, + 564, + 210 + ], + "spans": [ + { + "bbox": [ + 307, + 55, + 564, + 210 + ], + "type": "text", + "content": "or streaming data, making memory usage a crucial consideration. Techniques like memory-efficient architectures, data compression, and sparse modeling are commonly used to address this issue. Real-time AD adds further complexity, as algorithms must process incoming data and make rapid decisions in applications like autonomous driving and fraud detection [177], where even minimal delays can have severe consequences. Achieving real-time performance typically involves optimizing data processing speeds and decision-making through lightweight models [178] [179] and parallel processing techniques, such as GPU acceleration [180]. However, balancing real-time detection capabilities with high accuracy remains challenging." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 210, + 564, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 210, + 564, + 330 + ], + "spans": [ + { + "bbox": [ + 308, + 210, + 564, + 330 + ], + "type": "text", + "content": "The tension between computational complexity and detection accuracy persists, as complex models often excel in detection but lack practical applicability for real-time or large-scale scenarios. Simpler models, though computationally efficient, may fail to detect nuanced anomalies. Hybrid models or multi-stage frameworks that deploy complex methods only as needed provide a potential solution. Additionally, future research may benefit from exploring distributed computing solutions, like cloud [181] or edge computing, to enhance real-time AD performance in resource-limited environments." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 340, + 466, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 340, + 466, + 352 + ], + "spans": [ + { + "bbox": [ + 309, + 340, + 466, + 352 + ], + "type": "text", + "content": "C. Explainability and Interpretability" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 354, + 564, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 564, + 486 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 564, + 486 + ], + "type": "text", + "content": "Deep learning methods have greatly advanced AD by capturing complex patterns in high-dimensional data. However, they are often criticized as \"black-box\" models due to their lack of transparency, making it challenging to understand why certain data points are flagged as anomalies. For fields like healthcare, finance, or industrial monitoring, accurate detection alone is insufficient; stakeholders also need clear explanations to understand why a particular anomaly was detected. This lack of interpretability limits the practical deployment of deep learning models, as the inability to justify decisions reduces trust and hinders adoption in critical applications." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 486, + 564, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 486, + 564, + 640 + ], + "spans": [ + { + "bbox": [ + 308, + 486, + 564, + 640 + ], + "type": "text", + "content": "In fields like healthcare, where anomalies may be linked to medical diagnoses, or in finance, where fraud detection can carry legal implications, interpretability is essential. Transparent model decisions enable experts to validate results and make informed decisions. In safety-critical applications, such as autonomous driving or industrial equipment monitoring, understanding the rationale behind AD is vital for ensuring safety. One major challenge is balancing the trade-off between model interpretability and performance. Simpler models, like decision trees or linear regression, offer greater transparency but often lack the complexity needed to detect subtle anomalies in high-dimensional data. In contrast, deep learning models provide high accuracy but are harder to interpret." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 641, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 641, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 641, + 564, + 750 + ], + "type": "text", + "content": "Ongoing research is exploring hybrid approaches, where interpretable models are combined with more complex ones, allowing for accurate AD with the added benefit of interpretability. For example, attention mechanisms [182] in neural networks can help highlight specific data regions influencing decisions, providing insights into the model's internal workings. Alternatively, tools like Local Interpretable Model-agnostic Explanations (LIME) and SHapley Additive exPlanations (SHAP) [2] can offer post-hoc explanations, improving" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 32 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 103 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 103 + ], + "type": "text", + "content": "transparency without altering model structure. Future research could also focus on real-time explainability in time-sensitive applications, and incorporating domain knowledge or user feedback to enhance model interpretability." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 117, + 220, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 117, + 220, + 129 + ], + "spans": [ + { + "bbox": [ + 45, + 117, + 220, + 129 + ], + "type": "text", + "content": "D. Handling Diverse Types of Anomalies" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 132, + 301, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 132, + 301, + 275 + ], + "spans": [ + { + "bbox": [ + 45, + 132, + 301, + 275 + ], + "type": "text", + "content": "In real-world AD, multiple types of anomalies often coexist, adding complexity to the detection process. Beyond point anomalies, which are the simplest, other types like contextual and collective anomalies are common, especially in dynamic environments. For instance, in intelligent transportation systems, anomalies may include both isolated incidents (e.g., a single vehicle's sudden deceleration) and collective patterns (e.g., multiple vehicles simultaneously slowing down), each requiring different detection methods. Effectively capturing these varied anomaly types requires flexible models capable of adapting to different anomaly patterns without focusing on only one type." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 275, + 302, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 275, + 302, + 431 + ], + "spans": [ + { + "bbox": [ + 45, + 275, + 302, + 431 + ], + "type": "text", + "content": "Continuous research is needed to develop models that can generalize across anomaly types, enhancing adaptability and balancing detection accuracy with model flexibility. Hybrid approaches, for instance, can integrate different methods to capture diverse anomalies more effectively. The challenge remains in achieving this versatility without sacrificing accuracy, as models must maintain strong performance across different contexts. Future work may also explore multi-modal models [183] that combine different types of data, further improving detection capabilities by drawing from diverse data sources. These directions aim to create AD systems that are both robust and adaptable, capable of handling the complex and mixed nature of real-world anomaly scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 442, + 216, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 442, + 216, + 453 + ], + "spans": [ + { + "bbox": [ + 131, + 442, + 216, + 453 + ], + "type": "text", + "content": "VII. CONCLUSION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 457, + 302, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 457, + 302, + 685 + ], + "spans": [ + { + "bbox": [ + 45, + 457, + 302, + 685 + ], + "type": "text", + "content": "In this survey, we have provided a comprehensive overview of the recent advancements in AD with a primary focus on deep learning techniques from 2019 to 2024. By analyzing over 180 research papers from leading journals and conferences, we have explored how AD methods have evolved to address diverse challenges across various types of data. This survey categorizes and examines deep learning methods into reconstruction-based, prediction-based, and hybrid approaches, highlighting their strengths, limitations, and applications. Recognizing the simplicity, interpretability, and computational efficiency of traditional AD methods, we reviewed their integration with deep learning techniques. These hybrid approaches aim to leverage the strengths of both paradigms, enhancing robustness and efficiency in AD systems. This survey not only sheds light on the state-of-the-art techniques but also identifies gaps and opportunities for future research. By focusing on the latest trends and innovations, this work aims to inspire further exploration and advancements in the rapidly evolving field of AD." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 696, + 203, + 706 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 696, + 203, + 706 + ], + "spans": [ + { + "bbox": [ + 143, + 696, + 203, + 706 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 711, + 301, + 749 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 711, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 53, + 711, + 301, + 749 + ], + "type": "text", + "content": "[1] L. Ruff, J. R. Kauffmann, R. A. Vandermeulen, G. Montavon, W. Samek, M. Kloft, T. G. Dietterich, and K.-R. Müller, “A unifying review of deep and shallow anomaly detection,” Proceed. IEEE, vol. 109, no. 5, pp. 756–795, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 56, + 564, + 748 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 318, + 56, + 564, + 92 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 56, + 564, + 92 + ], + "spans": [ + { + "bbox": [ + 318, + 56, + 564, + 92 + ], + "type": "text", + "content": "[2] V. Vimbi, N. Shaffi, and M. Mahmud, \"Interpreting artificial intelligence models: a systematic review on the application of lime and shap in alzheimer's disease detection,\" Brain Informatics, vol. 11, no. 1, p. 10, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 93, + 564, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 93, + 564, + 120 + ], + "spans": [ + { + "bbox": [ + 318, + 93, + 564, + 120 + ], + "type": "text", + "content": "[3] F. Al-Turjman, H. Zahmatkesh, and R. Shahroze, “An overview of security and privacy in smart cities’ IoT communications,” Trans. Emerg. Telecommun. Technol., vol. 33, no. 3, p. e3677, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 121, + 563, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 121, + 563, + 156 + ], + "spans": [ + { + "bbox": [ + 318, + 121, + 563, + 156 + ], + "type": "text", + "content": "[4] Y. A. Qadri, A. Nauman, Y. B. Zikria, A. V. Vasilakos, and S. W. Kim, \"The future of healthcare internet of things: a survey of emerging technologies,\" IEEE Commun. Surv. Tutor., vol. 22, no. 2, pp. 1121-1167, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 156, + 563, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 156, + 563, + 184 + ], + "spans": [ + { + "bbox": [ + 318, + 156, + 563, + 184 + ], + "type": "text", + "content": "[5] M. Humayun, N. Jhanjhi, B. Hamid, and G. Ahmed, “Emerging smart logistics and transportation using IoT and blockchain,” IEEE Internet Things Mag., vol. 3, no. 2, pp. 58–62, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 185, + 563, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 185, + 563, + 211 + ], + "spans": [ + { + "bbox": [ + 318, + 185, + 563, + 211 + ], + "type": "text", + "content": "[6] S. H. Haji and S. Y. Ameen, \"Attack and anomaly detection in IoT networks using machine learning techniques: A review,\" Asian J. Res. Comput. Sci, vol. 9, no. 2, pp. 30-46, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 212, + 563, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 212, + 563, + 247 + ], + "spans": [ + { + "bbox": [ + 318, + 212, + 563, + 247 + ], + "type": "text", + "content": "[7] V. Mothukuri, P. Khare, R. M. Parizi, S. Pouriyeh, A. Dehghantanha, and G. Srivastava, \"Federated-learning-based anomaly detection for IoT security attacks,\" IEEE Internet Things J., vol. 9, no. 4, pp. 2545-2554, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 247, + 563, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 247, + 563, + 282 + ], + "spans": [ + { + "bbox": [ + 318, + 247, + 563, + 282 + ], + "type": "text", + "content": "[8] S. A. Al Mamun and J. Valimaki, “Anomaly detection and classification in cellular networks using automatic labeling technique for applying supervised learning,” *Proceedia Comput. Sci.*, vol. 140, pp. 186-195, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 284, + 563, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 284, + 563, + 328 + ], + "spans": [ + { + "bbox": [ + 318, + 284, + 563, + 328 + ], + "type": "text", + "content": "[9] M. E. Villa-Pérez, M. A. Alvarez-Carmona, O. Loyola-Gonzalez, M. A. Medina-Pérez, J. C. Velazco-Rossell, and K.-K. R. Choo, \"Semisupervised anomaly detection algorithms: A comparative summary and future research directions,\" Knowledge-Based Systems, vol. 218, p. 106878, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 329, + 563, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 329, + 563, + 357 + ], + "spans": [ + { + "bbox": [ + 314, + 329, + 563, + 357 + ], + "type": "text", + "content": "[10] G. Michau and O. Fink, \"Unsupervised transfer learning for anomaly detection: Application to complementary operating condition transfer,\" Knowledge-Based Systems, vol. 216, p. 106816, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 357, + 563, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 357, + 563, + 384 + ], + "spans": [ + { + "bbox": [ + 314, + 357, + 563, + 384 + ], + "type": "text", + "content": "[11] Y. Liang, J. Zhang, S. Zhao, R. Wu, Y. Liu, and S. Pan, \"Omni-frequency channel-selection representations for unsupervised anomaly detection,\" IEEE Trans. Image Process., 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 384, + 563, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 384, + 563, + 410 + ], + "spans": [ + { + "bbox": [ + 314, + 384, + 563, + 410 + ], + "type": "text", + "content": "[12] B. Siegel, \"Industrial anomaly detection: A comparison of unsupervised neural network architectures,\" IEEE Sens. Lett., vol. 4, no. 8, pp. 1-4, 2020." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 411, + 563, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 411, + 563, + 446 + ], + "spans": [ + { + "bbox": [ + 314, + 411, + 563, + 446 + ], + "type": "text", + "content": "[13] P. Bergmann, M. Fauser, D. Sattlegger, and C. Steger, \"Mvtec ad-a comprehensive real-world dataset for unsupervised anomaly detection,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2019, pp. 9592-9600." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 447, + 563, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 447, + 563, + 474 + ], + "spans": [ + { + "bbox": [ + 314, + 447, + 563, + 474 + ], + "type": "text", + "content": "[14] S. Schmidl, P. Wenig, and T. Papenbrock, \"Anomaly detection in time series: a comprehensive evaluation,\" Proc. VLDB Endow., vol. 15, no. 9, pp. 1779-1797, 2022." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 474, + 563, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 474, + 563, + 502 + ], + "spans": [ + { + "bbox": [ + 314, + 474, + 563, + 502 + ], + "type": "text", + "content": "[15] S. Zhai, Y. Cheng, W. Lu, and Z. Zhang, \"Deep structured energy based models for anomaly detection,\" in Int. Conf. Mach. Learn. (ICML). PMLR, 2016, pp. 1100-1109." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 502, + 563, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 502, + 563, + 538 + ], + "spans": [ + { + "bbox": [ + 314, + 502, + 563, + 538 + ], + "type": "text", + "content": "[16] H. Sarmadi and A. Karamodin, “A novel anomaly detection method based on adaptive mahalanobis-squared distance and one-class knn rule for structural health monitoring under environmental effects,” Mech. Syst. Signal Process., vol. 140, p. 106495, 2020." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 538, + 563, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 538, + 563, + 565 + ], + "spans": [ + { + "bbox": [ + 314, + 538, + 563, + 565 + ], + "type": "text", + "content": "[17] I. Syarif, A. Prugel-Bennett, and G. Wills, “Unsupervised clustering approach for network anomaly detection,” in Netw. Digit. Technol., Int. Conf., NDT 2012, Proc., Part I. Springer, 2012, pp. 135–145." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 566, + 563, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 566, + 563, + 592 + ], + "spans": [ + { + "bbox": [ + 314, + 566, + 563, + 592 + ], + "type": "text", + "content": "[18] D. Samariya and A. Thakkar, “A comprehensive survey of anomaly detection algorithms,” Ann. Data Sci., vol. 10, no. 3, pp. 829–850, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 593, + 563, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 593, + 563, + 619 + ], + "spans": [ + { + "bbox": [ + 314, + 593, + 563, + 619 + ], + "type": "text", + "content": "[19] G. Pang, C. Shen, L. Cao, and A. V. D. Hengel, “Deep learning for anomaly detection: A review,” ACM Comput. Surv., vol. 54, no. 2, pp. 1-38, 2021." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 620, + 563, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 620, + 563, + 638 + ], + "spans": [ + { + "bbox": [ + 314, + 620, + 563, + 638 + ], + "type": "text", + "content": "[20] L. Bergman, N. Cohen, and Y. Hoshen, \"Deep nearest neighbor anomaly detection,\" arXiv preprint arXiv:2002.10445, 2020." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 639, + 563, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 639, + 563, + 666 + ], + "spans": [ + { + "bbox": [ + 314, + 639, + 563, + 666 + ], + "type": "text", + "content": "[21] K. Leung and C. Leckie, \"Unsupervised anomaly detection in network intrusion detection using clusters,\" in Proc. 28th Australas. Conf. Comput. Sci., vol. 38, 2005, pp. 333-342." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 666, + 563, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 666, + 563, + 693 + ], + "spans": [ + { + "bbox": [ + 314, + 666, + 563, + 693 + ], + "type": "text", + "content": "[22] H. Ringberg, A. Soule, J. Rexford, and C. Diot, \"Sensitivity of pca for traffic anomaly detection,\" in Proc. 2007 ACM SIGMETRICS Int. Conf. Meas. Model. Comput. Syst., 2007, pp. 109-120." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 693, + 563, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 693, + 563, + 720 + ], + "spans": [ + { + "bbox": [ + 314, + 693, + 563, + 720 + ], + "type": "text", + "content": "[23] D. Kwon, H. Kim, J. Kim, S. C. Suh, I. Kim, and K. J. Kim, “A survey of deep learning-based network anomaly detection,” Cluster Computing, vol. 22, pp. 949–961, 2019." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 314, + 720, + 563, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 720, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 314, + 720, + 563, + 748 + ], + "type": "text", + "content": "[24] A. Aldweesh, A. Derhab, and A. Z. Emam, \"Deep learning approaches for anomaly-based intrusion detection systems: A survey, taxonomy, and open issues,\" Knowl.-Based Syst., vol. 189, p. 105124, 2020." + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 57, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 50, + 57, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 57, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 50, + 57, + 301, + 83 + ], + "type": "text", + "content": "[25] L. Li, J. Yan, H. Wang, and Y. Jin, \"Anomaly detection of time series with smoothness-inducing sequential variational auto-encoder,\" IEEE Trans. Neural Netw. Learn. Syst., vol. 32, no. 3, pp. 1177-1191, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 84, + 301, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 301, + 112 + ], + "type": "text", + "content": "[26] G. Harshvardhan, M. K. Gourisaria, M. Pandey, and S. S. Rautaray, \"A comprehensive survey and analysis of generative models in machine learning,\" Comput. Sci. Rev., vol. 38, p. 100285, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 113, + 301, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 113, + 301, + 131 + ], + "spans": [ + { + "bbox": [ + 51, + 113, + 301, + 131 + ], + "type": "text", + "content": "[27] B. Nachman and D. Shih, \"Anomaly detection with density estimation,\" Phys. Rev. D, vol. 101, no. 7, p. 075042, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 131, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 131, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 51, + 131, + 301, + 158 + ], + "type": "text", + "content": "[28] A. B. Nassif, M. A. Talib, Q. Nasir, and F. M. Dakalbab, \"Machine learning for anomaly detection: A systematic review,\" IEEE Access, vol. 9, pp. 78658-78700, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 159, + 301, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 159, + 301, + 194 + ], + "spans": [ + { + "bbox": [ + 51, + 159, + 301, + 194 + ], + "type": "text", + "content": "[29] X. Ma, J. Wu, S. Xue, J. Yang, C. Zhou, Q. Z. Sheng, H. Xiong, and L. Akoglu, “A comprehensive survey on graph anomaly detection with deep learning,” IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12012–12038, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 194, + 301, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 194, + 301, + 221 + ], + "spans": [ + { + "bbox": [ + 51, + 194, + 301, + 221 + ], + "type": "text", + "content": "[30] X. Xia, X. Pan, N. Li, X. He, L. Ma, X. Zhang, and N. Ding, “Gan-based anomaly detection: A review,” Neurocomputing, vol. 493, pp. 497-535, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 222, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 222, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 51, + 222, + 301, + 250 + ], + "type": "text", + "content": "[31] J. Lv, Y. Wang, and S. Chen, \"Adaptive multivariate time-series anomaly detection,\" Inf. Process. Manag., vol. 60, no. 4, p. 103383, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 250, + 301, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 250, + 301, + 278 + ], + "spans": [ + { + "bbox": [ + 51, + 250, + 301, + 278 + ], + "type": "text", + "content": "[32] M. Y. I. Basheer, A. M. Ali, N. H. A. Hamid, M. A. M. Ariffin, R. Osman, S. Nordin, and X. Gu, \"Autonomous anomaly detection for streaming data,\" Knowledge-Based Systems, vol. 284, p. 111235, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 278, + 301, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 278, + 301, + 314 + ], + "spans": [ + { + "bbox": [ + 51, + 278, + 301, + 314 + ], + "type": "text", + "content": "[33] X. Peng, H. Li, F. Yuan, S. G. Razul, Z. Chen, and Z. Lin, \"An extreme learning machine for unsupervised online anomaly detection in multivariate time series,\" Neurocomputing, vol. 501, pp. 596-608, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 315, + 301, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 315, + 301, + 351 + ], + "spans": [ + { + "bbox": [ + 51, + 315, + 301, + 351 + ], + "type": "text", + "content": "[34] Y. Choi, H. Lim, H. Choi, and I.-J. Kim, \"Gan-based anomaly detection and localization of multivariate time series data for power plant,\" in Proc. 2020 IEEE Int. Conf. Big Data Smart Comput. (BigComp). IEEE, 2020, pp. 71-74." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 351, + 301, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 351, + 301, + 378 + ], + "spans": [ + { + "bbox": [ + 51, + 351, + 301, + 378 + ], + "type": "text", + "content": "[35] H.-T. Duong, V.-T. Le, and V. T. Hoang, \"Deep learning-based anomaly detection in video surveillance: a survey,\" Sensors, vol. 23, no. 11, p. 5024, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 379, + 301, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 379, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 51, + 379, + 301, + 407 + ], + "type": "text", + "content": "[36] S. Thudumu, P. Branch, J. Jin, and J. Singh, \"A comprehensive survey of anomaly detection techniques for high dimensional big data,\" Journal of Big Data, vol. 7, pp. 1-30, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 407, + 301, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 407, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 51, + 407, + 301, + 434 + ], + "type": "text", + "content": "[37] I. Souiden, M. N. Omri, and Z. Brahmi, “A survey of outlier detection in high dimensional data streams,” Comput. Sci. Rev., vol. 44, p. 100463, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 434, + 301, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 434, + 301, + 462 + ], + "spans": [ + { + "bbox": [ + 51, + 434, + 301, + 462 + ], + "type": "text", + "content": "[38] Q. Ding and E. D. Kolaczyk, “A compressed pca subspace method for anomaly detection in high-dimensional data,” IEEE Trans. Inf. Theory, vol. 59, no. 11, pp. 7419–7433, 2013." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 462, + 301, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 462, + 301, + 490 + ], + "spans": [ + { + "bbox": [ + 51, + 462, + 301, + 490 + ], + "type": "text", + "content": "[39] M. Sakurada and T. Yairi, \"Anomaly detection using autoencoders with nonlinear dimensionality reduction,\" in Proc. MLSDA 2014 2nd Workshop Mach. Learn. Sensory Data Anal., 2014, pp. 4-11." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 51, + 490, + 301, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 490, + 301, + 525 + ], + "spans": [ + { + "bbox": [ + 51, + 490, + 301, + 525 + ], + "type": "text", + "content": "[40] T. Cheng and B. Wang, \"Total variation and sparsity regularized decomposition model with union dictionary for hyperspectral anomaly detection,\" IEEE Trans. Geosci. Remote Sens., vol. 59, no. 2, pp. 1472-1486, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 526, + 301, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 526, + 301, + 554 + ], + "spans": [ + { + "bbox": [ + 51, + 526, + 301, + 554 + ], + "type": "text", + "content": "[41] L. Li, W. Li, Q. Du, and R. Tao, \"Low-rank and sparse decomposition with mixture of gaussian for hyperspectral anomaly detection,\" IEEE Trans. Cybern., vol. 51, no. 9, pp. 4363-4372, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "spans": [ + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "type": "text", + "content": "[42] S. Han and S. S. Woo, “Learning sparse latent graph representations for anomaly detection in multivariate time series,” in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min., 2022, pp. 2977–2986." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "spans": [ + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "type": "text", + "content": "[43] X. Ma and W. Shi, “Aesmote: Adversarial reinforcement learning with smote for anomaly detection,” IEEE Trans. Netw. Sci. Eng., vol. 8, no. 2, pp. 943–956, 2021." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 51, + 609, + 301, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 609, + 301, + 636 + ], + "spans": [ + { + "bbox": [ + 51, + 609, + 301, + 636 + ], + "type": "text", + "content": "[44] M. Kim, E. Ou, P.-L. Loh, T. Allen, R. Agasie, and K. Liu, \"Rnn-based online anomaly detection in nuclear reactors for highly imbalanced datasets with uncertainty,\" Nucl. Eng. Des., vol. 364, p. 110699, 2020." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 51, + 637, + 301, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 637, + 301, + 665 + ], + "spans": [ + { + "bbox": [ + 51, + 637, + 301, + 665 + ], + "type": "text", + "content": "[45] G. Dlamini and M. Fahim, “Dgm: a data generative model to improve minority class presence in anomaly detection domain,” Neural Comput. Appl., vol. 33, pp. 13635–13646, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 51, + 665, + 301, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 665, + 301, + 692 + ], + "spans": [ + { + "bbox": [ + 51, + 665, + 301, + 692 + ], + "type": "text", + "content": "[46] S. Han, X. Hu, H. Huang, M. Jiang, and Y. Zhao, \"Adbench: Anomaly detection benchmark,\" Adv. Neural Inf. Process. Syst., vol. 35, pp. 32-142-32-159, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 51, + 692, + 301, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 692, + 301, + 720 + ], + "spans": [ + { + "bbox": [ + 51, + 692, + 301, + 720 + ], + "type": "text", + "content": "[47] Y. Zhang, Y. Chen, J. Wang, and Z. Pan, \"Unsupervised deep anomaly detection for multi-sensor time-series signals,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 2, pp. 2118-2132, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 51, + 720, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 720, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 51, + 720, + 301, + 747 + ], + "type": "text", + "content": "[48] D. Chen, L. Yue, X. Chang, M. Xu, and T. Jia, \"Nm-gan: Noise-modulated generative adversarial network for video anomaly detection,\" Pattern Recognition, vol. 116, p. 107969, 2021." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 315, + 56, + 563, + 748 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 315, + 56, + 563, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 56, + 563, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 56, + 563, + 83 + ], + "type": "text", + "content": "[49] M. U. Hassan, M. H. Rehmani, and J. Chen, \"Anomaly detection in blockchain networks: A comprehensive survey,\" IEEE Commun. Surv. Tutor., vol. 25, no. 1, pp. 289-318, 2022." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 84, + 563, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 84, + 563, + 120 + ], + "spans": [ + { + "bbox": [ + 315, + 84, + 563, + 120 + ], + "type": "text", + "content": "[50] Y. Liu, S. Garg, J. Nie, Y. Zhang, Z. Xiong, J. Kang, and M. S. Hossain, \"Deep anomaly detection for time-series data in industrial IoT: A communication-efficient on-device federated learning approach,\" IEEE Internet Things J., vol. 8, no. 8, pp. 6348-6358, 2020." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 315, + 120, + 563, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 120, + 563, + 156 + ], + "spans": [ + { + "bbox": [ + 315, + 120, + 563, + 156 + ], + "type": "text", + "content": "[51] M. J. Idrissi, H. Alami, A. El Mahdaouy, A. El Mekki, S. Oualil, Z. Yartaoui, and I. Berrada, “Fed-anids: Federated learning for anomaly-based network intrusion detection systems,” Expert Syst. Appl., vol. 234, p. 121000, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 156, + 563, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 156, + 563, + 193 + ], + "spans": [ + { + "bbox": [ + 315, + 156, + 563, + 193 + ], + "type": "text", + "content": "[52] L. Cui, Y. Qu, G. Xie, D. Zeng, R. Li, S. Shen, and S. Yu, \"Security and privacy-enhanced federated learning for anomaly detection in IoT infrastructures,\" IEEE Trans. Ind. Inform., vol. 18, no. 5, pp. 3492-3500, 2022." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 193, + 563, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 193, + 563, + 228 + ], + "spans": [ + { + "bbox": [ + 315, + 193, + 563, + 228 + ], + "type": "text", + "content": "[53] X. Wang, J. Liu, T. Qiu, C. Mu, C. Chen, and P. Zhou, \"A real-time collision prediction mechanism with deep learning for intelligent transportation system,\" IEEE Trans. Veh. Technol., vol. 69, no. 9, pp. 9497-9508, 2020." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 228, + 563, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 228, + 563, + 257 + ], + "spans": [ + { + "bbox": [ + 315, + 228, + 563, + 257 + ], + "type": "text", + "content": "[54] G. Li, T.-H. Nguyen, and J. J. Jung, \"Traffic incident detection based on dynamic graph embedding in vehicular edge computing,\" Appl. Sci., vol. 11, no. 13, p. 5861, 2021." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 257, + 563, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 257, + 563, + 284 + ], + "spans": [ + { + "bbox": [ + 315, + 257, + 563, + 284 + ], + "type": "text", + "content": "[55] G. Li and J. J. Jung, \"Deep learning for anomaly detection in multivariate time series: Approaches, applications, and challenges,\" Inf. Fusion, vol. 91, pp. 93-102, 2023." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 315, + 284, + 563, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 284, + 563, + 311 + ], + "spans": [ + { + "bbox": [ + 315, + 284, + 563, + 311 + ], + "type": "text", + "content": "[56] C. Zhao, X. Chang, T. Xie, H. Fujita, and J. Wu, \"Unsupervised anomaly detection based method of risk evaluation for road traffic accident,\" Appl. Intell., vol. 53, no. 1, pp. 369-384, 2023." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 311, + 563, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 311, + 563, + 338 + ], + "spans": [ + { + "bbox": [ + 315, + 311, + 563, + 338 + ], + "type": "text", + "content": "[57] S. Li, A. Pandey, B. Hooi, C. Faloutsos, and L. Pileggi, \"Dynamic graph-based anomaly detection in the electrical grid,\" IEEE Trans. Power Syst., vol. 37, no. 5, pp. 3408-3422, 2022." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 339, + 563, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 339, + 563, + 365 + ], + "spans": [ + { + "bbox": [ + 315, + 339, + 563, + 365 + ], + "type": "text", + "content": "[58] X. Wang and S.-H. Ahn, “Real-time prediction and anomaly detection of electrical load in a residential community,” Appl. Energy, vol. 259, p. 114145, 2020." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 365, + 563, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 365, + 563, + 402 + ], + "spans": [ + { + "bbox": [ + 315, + 365, + 563, + 402 + ], + "type": "text", + "content": "[59] I. Siniosoglou, P. Radoglou-Grammatikis, G. Efstathopoulos, P. Fouliras, and P. Sarigiannidis, “A unified deep learning anomaly detection and classification approach for smart grid environments,” IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1137-1151, 2021." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 315, + 402, + 563, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 402, + 563, + 430 + ], + "spans": [ + { + "bbox": [ + 315, + 402, + 563, + 430 + ], + "type": "text", + "content": "[60] T. Fernando, H. Gammulle, S. Denman, S. Sridharan, and C. Fookes, \"Deep learning for medical anomaly detection-a survey,\" ACM Comput. Surv., vol. 54, no. 7, pp. 1-37, 2021." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 315, + 430, + 563, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 430, + 563, + 456 + ], + "spans": [ + { + "bbox": [ + 315, + 430, + 563, + 456 + ], + "type": "text", + "content": "[61] E. Šabić, D. Keeley, B. Henderson, and S. Nannemann, “Healthcare and anomaly detection: using machine learning to predict anomalies in heart rate data,” *Ai & Society*, vol. 36, no. 1, pp. 149–158, 2021." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 315, + 456, + 563, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 456, + 563, + 483 + ], + "spans": [ + { + "bbox": [ + 315, + 456, + 563, + 483 + ], + "type": "text", + "content": "[62] K. G. Al-Hashedi and P. Magalingam, “Financial fraud detection applying data mining techniques: A comprehensive review from 2009 to 2019,” Comput. Sci. Rev., vol. 40, p. 100402, 2021." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 315, + 483, + 563, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 483, + 563, + 511 + ], + "spans": [ + { + "bbox": [ + 315, + 483, + 563, + 511 + ], + "type": "text", + "content": "[63] W. Hilal, S. A. Gadsden, and J. Yawney, \"Financial fraud: a review of anomaly detection techniques and recent advances,\" Expert Syst. Appl., vol. 193, p. 116429, 2022." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 315, + 511, + 563, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 511, + 563, + 538 + ], + "spans": [ + { + "bbox": [ + 315, + 511, + 563, + 538 + ], + "type": "text", + "content": "[64] H. Fujita, A. Gaeta, V. Loia, and F. Orciuoli, “Resilience analysis of critical infrastructures: A cognitive approach based on granular computing,” IEEE Trans. Cybern., vol. 49, no. 5, pp. 1835–1848, 2019." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 315, + 538, + 563, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 538, + 563, + 566 + ], + "spans": [ + { + "bbox": [ + 315, + 538, + 563, + 566 + ], + "type": "text", + "content": "[65] V. K. Singh and M. Govindarasu, “A cyber-physical anomaly detection for wide-area protection using machine learning,” IEEE Trans. Smart Grid, vol. 12, no. 4, pp. 3514–3526, 2021." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 315, + 566, + 563, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 566, + 563, + 601 + ], + "spans": [ + { + "bbox": [ + 315, + 566, + 563, + 601 + ], + "type": "text", + "content": "[66] S. M. Nagarajan, G. G. Deverajan, A. K. Bashir, R. P. Mahapatra, and M. S. Al-Numay, \"TADF-cps: Intelligent anomaly detection framework towards cyber physical systems,\" Comput. Commun., vol. 188, pp. 81–89, 2022." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 315, + 601, + 563, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 601, + 563, + 638 + ], + "spans": [ + { + "bbox": [ + 315, + 601, + 563, + 638 + ], + "type": "text", + "content": "[67] T. Nakao, S. Hanaoka, Y. Nomura, M. Murata, T. Takenaga, S. Miki, T. Watadani, T. Yoshikawa, N. Hayashi, and O. Abe, \"Unsupervised deep anomaly detection in chest radiographs,\" J. Digit. Imaging, vol. 34, pp. 418-427, 2021." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 638, + 563, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 638, + 563, + 674 + ], + "spans": [ + { + "bbox": [ + 315, + 638, + 563, + 674 + ], + "type": "text", + "content": "[68] W. H. Pinaya, P.-D. Tudosiu, R. Gray, G. Rees, P. Nachev, S. Ourselin, and M. J. Cardoso, \"Unsupervised brain imaging 3d anomaly detection and segmentation with transformers,\" Med. Image Anal., vol. 79, p. 102475, 2022." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 315, + 674, + 563, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 674, + 563, + 701 + ], + "spans": [ + { + "bbox": [ + 315, + 674, + 563, + 701 + ], + "type": "text", + "content": "[69] L. Chen, Z. You, N. Zhang, J. Xi, and X. Le, “Utrad: Anomaly detection and localization with u-transformer,” Neural Networks, vol. 147, pp. 53–62, 2022." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 315, + 701, + 563, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 701, + 563, + 729 + ], + "spans": [ + { + "bbox": [ + 315, + 701, + 563, + 729 + ], + "type": "text", + "content": "[70] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, \"Anomaly detection in medical imaging with deep perceptual autoencoders,\" IEEE Access, vol. 9, pp. 118571-118583, 2021." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 315, + 729, + 563, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 729, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 315, + 729, + 563, + 748 + ], + "type": "text", + "content": "[71] R. L. Draelos, D. Dov, M. A. Mazurowski, J. Y. Lo, R. Henao, G. D. Rubin, and L. Carin, \"Machine-learning-based multiple abnormality" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 57, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 68, + 57, + 299, + 75 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 57, + 299, + 75 + ], + "spans": [ + { + "bbox": [ + 68, + 57, + 299, + 75 + ], + "type": "text", + "content": "prediction with large-scale chest computed tomography volumes,\" Med. Image Anal., vol. 67, p. 101857, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 76, + 301, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 76, + 301, + 102 + ], + "spans": [ + { + "bbox": [ + 51, + 76, + 301, + 102 + ], + "type": "text", + "content": "[72] N. Shvetsova, B. Bakker, I. Fedulova, H. Schulz, and D. V. Dylov, \"Anomaly detection in medical imaging with deep perceptual autoencoders,\" IEEE Access, vol. 9, pp. 118571-118583, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 103, + 301, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 103, + 301, + 139 + ], + "spans": [ + { + "bbox": [ + 52, + 103, + 301, + 139 + ], + "type": "text", + "content": "[73] H. Zhao, Y. Li, N. He, K. Ma, L. Fang, H. Li, and Y. Zheng, \"Anomaly detection for medical images using self-supervised and translation-consistent features,\" IEEE Trans. Med. Imaging, vol. 40, no. 12, pp. 3641-3651, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 140, + 301, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 140, + 301, + 167 + ], + "spans": [ + { + "bbox": [ + 52, + 140, + 301, + 167 + ], + "type": "text", + "content": "[74] R. Nayak, U. C. Pati, and S. K. Das, “A comprehensive review on deep learning-based methods for video anomaly detection,” Image Vis. Comput., vol. 106, p. 104078, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 168, + 301, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 168, + 301, + 195 + ], + "spans": [ + { + "bbox": [ + 52, + 168, + 301, + 195 + ], + "type": "text", + "content": "[75] Y. Wang, T. Liu, J. Zhou, and J. Guan, \"Video anomaly detection based on spatio-temporal relationships among objects,\" Neurocomputing, vol. 532, pp. 141-151, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 196, + 301, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 196, + 301, + 222 + ], + "spans": [ + { + "bbox": [ + 51, + 196, + 301, + 222 + ], + "type": "text", + "content": "[76] N. Li, F. Chang, and C. Liu, \"Spatial-temporal cascade autoencoder for video anomaly detection in crowded scenes,\" IEEE Trans. Multimed., vol. 23, pp. 203-215, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 223, + 301, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 223, + 301, + 251 + ], + "spans": [ + { + "bbox": [ + 51, + 223, + 301, + 251 + ], + "type": "text", + "content": "[77] D. Chen, P. Wang, L. Yue, Y. Zhang, and T. Jia, “Anomaly detection in surveillance video based on bidirectional prediction,” Image Vis. Comput., vol. 98, p. 103915, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 251, + 301, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 251, + 301, + 278 + ], + "spans": [ + { + "bbox": [ + 51, + 251, + 301, + 278 + ], + "type": "text", + "content": "[78] M. H. Bhuyan, D. K. Bhattacharyya, and J. K. Kalita, “Network anomaly detection: methods, systems and tools,” IEEE Commun. Surv. Tutor., vol. 16, no. 1, pp. 303-336, 2013." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 279, + 301, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 279, + 301, + 314 + ], + "spans": [ + { + "bbox": [ + 51, + 279, + 301, + 314 + ], + "type": "text", + "content": "[79] S. Liu, B. Zhou, Q. Ding, B. Hooi, Z. Zhang, H. Shen, and X. Cheng, \"Time series anomaly detection with adversarial reconstruction networks,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 4, pp. 4293-4306, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 315, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 315, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 51, + 315, + 301, + 342 + ], + "type": "text", + "content": "[80] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 343, + 301, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 343, + 301, + 370 + ], + "spans": [ + { + "bbox": [ + 51, + 343, + 301, + 370 + ], + "type": "text", + "content": "[81] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio, \"Generative adversarial networks,\" Commun. ACM, vol. 63, no. 11, pp. 139–144, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 371, + 301, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 371, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 51, + 371, + 301, + 407 + ], + "type": "text", + "content": "[82] L. Yang, Z. Zhang, Y. Song, S. Hong, R. Xu, Y. Zhao, W. Zhang, B. Cui, and M.-H. Yang, \"Diffusion models: A comprehensive survey of methods and applications,\" ACM Comput. Surv., vol. 56, no. 4, pp. 1-39, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 407, + 301, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 407, + 301, + 443 + ], + "spans": [ + { + "bbox": [ + 51, + 407, + 301, + 443 + ], + "type": "text", + "content": "[83] S. Bond-Taylor, A. Leach, Y. Long, and C. G. Willcocks, “Deep generative modelling: A comparative review of vaes, gans, normalizing flows, energy-based and autoregressive models,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 44, no. 11, pp. 7327-7347, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 444, + 301, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 444, + 301, + 471 + ], + "spans": [ + { + "bbox": [ + 51, + 444, + 301, + 471 + ], + "type": "text", + "content": "[84] S. Sheynin, S. Benaim, and L. Wolf, “A hierarchical transformation-discriminating generative model for few shot anomaly detection,” in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2021, pp. 8495-8504." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 472, + 301, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 472, + 301, + 498 + ], + "spans": [ + { + "bbox": [ + 51, + 472, + 301, + 498 + ], + "type": "text", + "content": "[85] W. Lim, K. Y. S. Chek, L. B. Theng, and C. T. C. Lin, “Future of generative adversarial networks (gan) for anomaly detection in network security: A review,” Comput. Secur., p. 103733, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 51, + 499, + 301, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 499, + 301, + 526 + ], + "spans": [ + { + "bbox": [ + 51, + 499, + 301, + 526 + ], + "type": "text", + "content": "[86] X. Du, J. Chen, J. Yu, S. Li, and Q. Tan, \"Generative adversarial nets for unsupervised outlier detection,\" Expert Syst. Appl., vol. 236, p. 121161, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 526, + 301, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 526, + 301, + 554 + ], + "spans": [ + { + "bbox": [ + 51, + 526, + 301, + 554 + ], + "type": "text", + "content": "[87] J. Wu, Z. Zhao, C. Sun, R. Yan, and X. Chen, “Fault-attention generative probabilistic adversarial autoencoder for machine anomaly detection,” IEEE Trans. Ind. Inf., vol. 16, no. 12, pp. 7479–7488, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "spans": [ + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "type": "text", + "content": "[88] F. Dong, Y. Zhang, and X. Nie, \"Dual discriminator generative adversarial network for video anomaly detection,\" IEEE Access, vol. 8, pp. 88170-88176, 2020." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "spans": [ + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "type": "text", + "content": "[89] D. Guo, Z. Liu, and R. Li, \"Regraphgan: A graph generative adversarial network model for dynamic network anomaly detection,\" Neural Networks, vol. 166, pp. 273-285, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 51, + 610, + 301, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 610, + 301, + 637 + ], + "spans": [ + { + "bbox": [ + 51, + 610, + 301, + 637 + ], + "type": "text", + "content": "[90] Y. Liu, Z. Li, C. Zhou, Y. Jiang, J. Sun, M. Wang, and X. He, \"Generative adversarial active learning for unsupervised outlier detection,\" IEEE Trans. Knowl. Data Eng., vol. 32, no. 8, pp. 1517-1528, 2019." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 51, + 638, + 301, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 638, + 301, + 682 + ], + "spans": [ + { + "bbox": [ + 51, + 638, + 301, + 682 + ], + "type": "text", + "content": "[91] C. Liu, Z. Kong, S. Babu, C. Joslin, and J. Ferguson, \"An integrated manifold learning approach for high-dimensional data feature extractions and its applications to online process monitoring of additive manufacturing,\" IISE Transactions, vol. 53, no. 11, pp. 1215-1230, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 51, + 683, + 301, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 683, + 301, + 719 + ], + "spans": [ + { + "bbox": [ + 51, + 683, + 301, + 719 + ], + "type": "text", + "content": "[92] J. Miao, H. Tao, H. Xie, J. Sun, and J. Cao, \"Reconstruction-based anomaly detection for multivariate time series using contrastive generative adversarial networks,\" Inf. Process. Manag., vol. 61, no. 1, p. 103569, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 51, + 719, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 719, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 51, + 719, + 301, + 747 + ], + "type": "text", + "content": "[93] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, \"Anomaly-gan: A data augmentation method for train surface anomaly detection,\" Expert Syst. Appl., vol. 228, p. 120284, 2023." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 563, + 747 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 315, + 56, + 563, + 92 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 56, + 563, + 92 + ], + "spans": [ + { + "bbox": [ + 315, + 56, + 563, + 92 + ], + "type": "text", + "content": "[94] Y. Li, Z. Shi, C. Liu, W. Tian, Z. Kong, and C. B. Williams, \"Augmented time regularized generative adversarial network (atr-gan) for data augmentation in online process anomaly detection,\" IEEE Trans. Autom. Sci. Eng., vol. 19, no. 4, pp. 3338-3355, 2021." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 93, + 563, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 93, + 563, + 120 + ], + "spans": [ + { + "bbox": [ + 314, + 93, + 563, + 120 + ], + "type": "text", + "content": "[95] L. Zhang, W. Bai, X. Xie, L. Chen, and P. Dong, “Tmanomaly: Time-series mutual adversarial networks for industrial anomaly detection,” IEEE Trans. Ind. Inform., 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 121, + 563, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 121, + 563, + 156 + ], + "spans": [ + { + "bbox": [ + 314, + 121, + 563, + 156 + ], + "type": "text", + "content": "[96] B. Du, X. Sun, J. Ye, K. Cheng, J. Wang, and L. Sun, \"Gan-based anomaly detection for multivariate time series using polluted training set,\" IEEE Trans. Knowl. Data Eng., vol. 35, no. 12, pp. 12 208-12 219, 2021." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 156, + 563, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 156, + 563, + 184 + ], + "spans": [ + { + "bbox": [ + 314, + 156, + 563, + 184 + ], + "type": "text", + "content": "[97] G. Fan, Y. Ma, X. Mei, F. Fan, J. Huang, and J. Ma, “Hyperspectral anomaly detection with robust graph autoencoders,” IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 185, + 563, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 185, + 563, + 219 + ], + "spans": [ + { + "bbox": [ + 314, + 185, + 563, + 219 + ], + "type": "text", + "content": "[98] S. Wang, X. Wang, L. Zhang, and Y. Zhong, \"Auto-ad: Autonomous hyperspectral anomaly detection network based on fully convolutional autoencoder,\" IEEE Trans. Geosci. Remote Sens., vol. 60, pp. 1-14, 2021." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 220, + 563, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 220, + 563, + 247 + ], + "spans": [ + { + "bbox": [ + 314, + 220, + 563, + 247 + ], + "type": "text", + "content": "[99] H. Liu, X. Su, X. Shen, and X. Zhou, \"Msnet: Self-supervised multiscale network with enhanced separation training for hyperspectral anomaly detection,\" IEEE Trans. Geosci. Remote Sens., 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 248, + 563, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 248, + 563, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 248, + 563, + 274 + ], + "type": "text", + "content": "[100] X. Lin, Z. Li, H. Fan, Y. Fu, and X. Chen, “Exploiting negative correlation for unsupervised anomaly detection in contaminated time series,” Expert Syst. Appl., p. 123535, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 275, + 563, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 275, + 563, + 310 + ], + "spans": [ + { + "bbox": [ + 310, + 275, + 563, + 310 + ], + "type": "text", + "content": "[101] C. Huang, Z. Yang, J. Wen, Y. Xu, Q. Jiang, J. Yang, and Y. Wang, \"Self-supervision-augmented deep autoencoder for unsupervised visual anomaly detection,\" IEEE Trans. Cybern., vol. 52, no. 12, pp. 13834-13847, 2021." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 311, + 563, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 311, + 563, + 338 + ], + "spans": [ + { + "bbox": [ + 310, + 311, + 563, + 338 + ], + "type": "text", + "content": "[102] C. Yin, S. Zhang, J. Wang, and N. N. Xiong, \"Anomaly detection based on convolutional recurrent autoencoder for IoT time series,\" IEEE Trans. Syst. Man Cybern.: Syst., vol. 52, no. 1, pp. 112-122, 2020." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 339, + 563, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 339, + 563, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 339, + 563, + 365 + ], + "type": "text", + "content": "[103] W. Zhang, C. Zhang, and F. Tsung, “Grelen: Multivariate time series anomaly detection from the perspective of graph relational learning,” in IJCAI, 2022, pp. 2390–2397." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 366, + 563, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 366, + 563, + 392 + ], + "spans": [ + { + "bbox": [ + 310, + 366, + 563, + 392 + ], + "type": "text", + "content": "[104] X. Zhou, Y. Hu, W. Liang, J. Ma, and Q. Jin, \"Variational lstm enhanced anomaly detection for industrial big data,\" IEEE Trans. Ind. Inform., vol. 17, no. 5, pp. 3469-3477, 2020." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 392, + 563, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 392, + 563, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 392, + 563, + 411 + ], + "type": "text", + "content": "[105] A. Makhzani, J. Shlens, N. Jaitly, I. Goodfellow, and B. Frey, \"Adversarial autoencoders,\" arXiv preprint arXiv:1511.05644, 2015." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 411, + 563, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 411, + 563, + 437 + ], + "spans": [ + { + "bbox": [ + 310, + 411, + 563, + 437 + ], + "type": "text", + "content": "[106] Q. Su, B. Tian, H. Wan, and J. Yin, \"Anomaly detection under contaminated data with contamination-immune bidirectional gans,\" IEEE Trans. Knowl. Data Eng., 2024." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 438, + 563, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 438, + 563, + 465 + ], + "spans": [ + { + "bbox": [ + 310, + 438, + 563, + 465 + ], + "type": "text", + "content": "[107] J. Yu, X. Gao, F. Zhai, B. Li, B. Xue, S. Fu, L. Chen, and Z. Meng, \"An adversarial contrastive autoencoder for robust multivariate time series anomaly detection,\" Expert Syst. Appl., vol. 245, p. 123010, 2024." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 466, + 563, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 466, + 563, + 484 + ], + "spans": [ + { + "bbox": [ + 310, + 466, + 563, + 484 + ], + "type": "text", + "content": "[108] J. Ho, A. Jain, and P. Abbeel, “Denoising diffusion probabilistic models,” Adv. Neural Inf. Process. Syst., vol. 33, pp. 6840–6851, 2020." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 485, + 563, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 485, + 563, + 511 + ], + "spans": [ + { + "bbox": [ + 310, + 485, + 563, + 511 + ], + "type": "text", + "content": "[109] J. Wolleb, F. Bieder, R. Sandkühler, and P. C. Cattin, \"Diffusion models for medical anomaly detection,\" in Int. Conf. Med. Image Comput. Comput.-Assist. Interv. (MICCAI). Springer, 2022, pp. 35-45." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 512, + 563, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 512, + 563, + 538 + ], + "spans": [ + { + "bbox": [ + 310, + 512, + 563, + 538 + ], + "type": "text", + "content": "[110] X. Zhang, N. Li, J. Li, T. Dai, Y. Jiang, and S.-T. Xia, \"Unsupervised surface anomaly detection with diffusion probabilistic model,\" in Proc. IEEE/CVF Int. Conf. Comput. Vis., 2023, pp. 6782-6791." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 539, + 563, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 539, + 563, + 565 + ], + "spans": [ + { + "bbox": [ + 310, + 539, + 563, + 565 + ], + "type": "text", + "content": "[111] S. Li, J. Yu, Y. Lu, G. Yang, X. Du, and S. Liu, \"Self-supervised enhanced denoising diffusion for anomaly detection,\" Inf. Sci., vol. 669, p. 120612, 2024." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 566, + 563, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 566, + 563, + 593 + ], + "spans": [ + { + "bbox": [ + 310, + 566, + 563, + 593 + ], + "type": "text", + "content": "[112] J. Zeng, X. Liu, and Z. Li, \"Radio anomaly detection based on improved denoising diffusion probabilistic models,\" IEEE Commun. Lett., 2023." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 593, + 563, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 593, + 563, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 593, + 563, + 620 + ], + "type": "text", + "content": "[113] X. Li, C. Xiao, Z. Feng, S. Pang, W. Tai, and F. Zhou, \"Controlled graph neural networks with denoising diffusion for anomaly detection,\" Expert Syst. Appl., vol. 237, p. 121533, 2024." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 621, + 563, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 621, + 563, + 647 + ], + "spans": [ + { + "bbox": [ + 310, + 621, + 563, + 647 + ], + "type": "text", + "content": "[114] C. Li, G. Feng, Y. Li, R. Liu, Q. Miao, and L. Chang, “Diffstad: Denoising diffusion probabilistic models for vehicle trajectory anomaly detection,” Knowledge-Based Systems, vol. 286, p. 111387, 2024." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 648, + 563, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 648, + 563, + 683 + ], + "spans": [ + { + "bbox": [ + 310, + 648, + 563, + 683 + ], + "type": "text", + "content": "[115] J. Pei, J. Wang, D. Shi, and P. Wang, \"Detection and imputation-based two-stage denoising diffusion power system measurement recovery under cyber-physical uncertainties,\" IEEE Trans. Smart Grid, vol. 15, no. 6, pp. 5965-5980, 2024." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 684, + 563, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 684, + 563, + 719 + ], + "spans": [ + { + "bbox": [ + 310, + 684, + 563, + 719 + ], + "type": "text", + "content": "[116] H. He, J. Zhang, H. Chen, X. Chen, Z. Li, X. Chen, Y. Wang, C. Wang, and L. Xie, \"A diffusion-based framework for multi-class anomaly detection,\" in Proc. AAAI Conf. Artif. Intell., vol. 38, no. 8, 2024, pp. 8472-8480." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 719, + 563, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 719, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 719, + 563, + 747 + ], + "type": "text", + "content": "[117] A. Sherstinsky, “Fundamentals of recurrent neural network (rnn) and long short-term memory (lstm) network,” Physica D: Nonlinear Phenomena, vol. 404, p. 132306, 2020." + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 56, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 56, + 301, + 83 + ], + "type": "text", + "content": "[118] G. Van Houdt, C. Mosquera, and G. Nápoles, “A review on the long short-term memory model,” Artif. Intell. Rev., vol. 53, no. 8, pp. 5929–5955, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 84, + 301, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 84, + 301, + 111 + ], + "spans": [ + { + "bbox": [ + 47, + 84, + 301, + 111 + ], + "type": "text", + "content": "[119] B. Lindemann, B. Maschler, N. Sahlab, and M. Weyrich, “A survey on anomaly detection for technical systems using lstm networks,” Comput. Ind., vol. 131, p. 103498, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 112, + 301, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 112, + 301, + 138 + ], + "spans": [ + { + "bbox": [ + 47, + 112, + 301, + 138 + ], + "type": "text", + "content": "[120] R. Dey and F. M. Salem, “Gate-variants of gated recurrent unit (gru) neural networks,” in Proc. 2017 IEEE 60th Int. Midwest Symp. Circuits Syst. (MWSCAS). IEEE, 2017, pp. 1597–1600." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 139, + 301, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 139, + 301, + 165 + ], + "spans": [ + { + "bbox": [ + 47, + 139, + 301, + 165 + ], + "type": "text", + "content": "[121] Y. Wang, X. Du, Z. Lu, Q. Duan, and J. Wu, \"Improved lstm-based time-series anomaly detection in rail transit operation environments,\" IEEE Trans. Ind. Inform., vol. 18, no. 12, pp. 9027-9036, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 166, + 301, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 166, + 301, + 193 + ], + "spans": [ + { + "bbox": [ + 47, + 166, + 301, + 193 + ], + "type": "text", + "content": "[122] H. Chen, H. Liu, X. Chu, Q. Liu, and D. Xue, \"Anomaly detection and critical scada parameters identification for wind turbines based on lstm-ae neural network,\" Renew. Energy, vol. 172, pp. 829-840, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 194, + 301, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 194, + 301, + 220 + ], + "spans": [ + { + "bbox": [ + 47, + 194, + 301, + 220 + ], + "type": "text", + "content": "[123] P. Liu, X. Sun, Y. Han, Z. He, W. Zhang, and C. Wu, \"Arrhythmia classification of lstm autoencoder based on time series anomaly detection,\" Biomed. Signal Process. Control, vol. 71, p. 103228, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 220, + 301, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 220, + 301, + 247 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 301, + 247 + ], + "type": "text", + "content": "[124] Y. Yao, J. Ma, S. Feng, and Y. Ye, \"Svd-ae: An asymmetric autoencoder with svd regularization for multivariate time series anomaly detection,\" Neural Networks, vol. 170, pp. 535-547, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 248, + 301, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 248, + 301, + 283 + ], + "spans": [ + { + "bbox": [ + 47, + 248, + 301, + 283 + ], + "type": "text", + "content": "[125] S. Longari, D. H. N. Valcarcel, M. Zago, M. Carminati, and S. Zanero, \"Cannolo: An anomaly detection system based on lstm autoencoders for controller area network,\" IEEE Trans. Netw. Serv. Manag., vol. 18, no. 2, pp. 1913-1924, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 284, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 301, + 319 + ], + "type": "text", + "content": "[126] J. Pei, J. Wang, and D. Shi, \"Data-driven measurement tampering detection considering spatial-temporal correlations,\" in Proc. 2019 IEEE 3rd Conf. Energy Internet Energy Syst. Integr. (EI2), 2019, pp. 2641-2646." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 320, + 301, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 301, + 347 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 301, + 347 + ], + "type": "text", + "content": "[127] T. Lei, C. Gong, G. Chen, M. Ou, K. Yang, and J. Li, “A novel unsupervised framework for time series data anomaly detection via spectrum decomposition,” Knowledge-Based Systems, vol. 280, p. 111002, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 347, + 301, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 347, + 301, + 374 + ], + "spans": [ + { + "bbox": [ + 47, + 347, + 301, + 374 + ], + "type": "text", + "content": "[128] D. Hu, S. Wu, J. Wang, and D. Shi, \"Training a dynamic neural network to detect false data injection attacks under multiple unforeseen operating conditions,\" IEEE Trans. Smart Grid, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 375, + 301, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 375, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 47, + 375, + 301, + 402 + ], + "type": "text", + "content": "[129] C. Tang, L. Xu, B. Yang, Y. Tang, and D. Zhao, “Gru-based interpretable multivariate time series anomaly detection in industrial control system,” Comput. Secur., vol. 127, p. 103094, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 402, + 301, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 402, + 301, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 402, + 301, + 437 + ], + "type": "text", + "content": "[130] J. Yu, X. Gao, B. Li, F. Zhai, J. Lu, B. Xue, S. Fu, and C. Xiao, \"A filter-augmented auto-encoder with learnable normalization for robust multivariate time series anomaly detection,\" Neural Networks, vol. 170, pp. 478-493, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 438, + 301, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 438, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 438, + 301, + 456 + ], + "type": "text", + "content": "[131] A. Vaswani, \"Attention is all you need,\" Adv. Neural Inf. Process. Syst., 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 456, + 301, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 301, + 483 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 301, + 483 + ], + "type": "text", + "content": "[132] H. Kang and P. Kang, \"Transformer-based multivariate time series anomaly detection using inter-variable attention mechanism,\" Knowledge-Based Systems, p. 111507, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 484, + 301, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 484, + 301, + 511 + ], + "spans": [ + { + "bbox": [ + 47, + 484, + 301, + 511 + ], + "type": "text", + "content": "[133] J. Kim, H. Kang, and P. Kang, “Time-series anomaly detection with stacked transformer representations and 1d convolutional network,” Eng. Appl. Artif. Intell., vol. 120, p. 105964, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 511, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 511, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 511, + 301, + 538 + ], + "type": "text", + "content": "[134] S. Tuli, G. Casale, and N. R. Jennings, “Tranad: Deep transformer networks for anomaly detection in multivariate time series data,” arXiv preprint arXiv:2201.07284, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 538, + 301, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 301, + 565 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 301, + 565 + ], + "type": "text", + "content": "[135] C. Wang and G. Liu, “From anomaly detection to classification with graph attention and transformer for multivariate time series,” Adv. Eng. Inform., vol. 60, p. 102357, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 566, + 301, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 566, + 301, + 593 + ], + "spans": [ + { + "bbox": [ + 47, + 566, + 301, + 593 + ], + "type": "text", + "content": "[136] J. Fan, Z. Wang, H. Wu, D. Sun, J. Wu, and X. Lu, \"An adversarial time-frequency reconstruction network for unsupervised anomaly detection,\" Neural Networks, vol. 168, pp. 44-56, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 593, + 301, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 593, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 593, + 301, + 628 + ], + "type": "text", + "content": "[137] Y. Shi, B. Wang, Y. Yu, X. Tang, C. Huang, and J. Dong, \"Robust anomaly detection for multivariate time series through temporal GCNs and attention-based vae,\" Knowledge-Based Systems, vol. 275, p. 110725, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 629, + 301, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 301, + 657 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 301, + 657 + ], + "type": "text", + "content": "[138] C. Ding, S. Sun, and J. Zhao, \"Mst-gat: A multimodal spatial-temporal graph attention network for time series anomaly detection,\" Inf. Fusion, vol. 89, pp. 527-536, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 657, + 301, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 301, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 301, + 684 + ], + "type": "text", + "content": "[139] W. Zhu, W. Li, E. R. Dorsey, and J. Luo, \"Unsupervised anomaly detection by densely contrastive learning for time series data,\" Neural Networks, vol. 168, pp. 450-458, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 684, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 684, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 684, + 301, + 711 + ], + "type": "text", + "content": "[140] H. Sun, M. Chen, J. Weng, Z. Liu, and G. Geng, \"Anomaly detection for in-vehicle network using cnn-lstm with attention mechanism,\" IEEE Trans. Veh. Technol., vol. 70, no. 10, pp. 10880-10893, 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[141] T. Le, H. C. Vu, A. Ponchet-Durupt, N. Boudaoud, Z. Cherfi-Boulanger, and T. Nguyen-Trang, \"Unsupervised detecting anomalies in multivariate time series by robust convolutional LSTM encoder-decoder (rcled),\" Neurocomputing, vol. 592, p. 127791, 2024." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 563, + 747 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 310, + 56, + 563, + 92 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 56, + 563, + 92 + ], + "spans": [ + { + "bbox": [ + 310, + 56, + 563, + 92 + ], + "type": "text", + "content": "[142] M. Jin, H. Y. Koh, Q. Wen, D. Zambon, C. Alippi, G. I. Webb, I. King, and S. Pan, “A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection,” IEEE Trans. Pattern Anal. Mach. Intell., 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 93, + 563, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 93, + 563, + 120 + ], + "spans": [ + { + "bbox": [ + 310, + 93, + 563, + 120 + ], + "type": "text", + "content": "[143] Y. Wu, H.-N. Dai, and H. Tang, \"Graph neural networks for anomaly detection in industrial internet of things,\" IEEE Internet Things J., vol. 9, no. 12, pp. 9214-9231, 2022." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 121, + 563, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 121, + 563, + 148 + ], + "spans": [ + { + "bbox": [ + 310, + 121, + 563, + 148 + ], + "type": "text", + "content": "[144] H. Kim, B. S. Lee, W.-Y. Shin, and S. Lim, “Graph anomaly detection with graph neural networks: Current status and challenges,” IEEE Access, vol. 10, pp. 111820-111829, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 148, + 563, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 148, + 563, + 175 + ], + "spans": [ + { + "bbox": [ + 310, + 148, + 563, + 175 + ], + "type": "text", + "content": "[145] A. Deng and B. Hooi, “Graph neural network-based anomaly detection in multivariate time series,” in Proc. AAAI Conf. Artif. Intell. (AAAI), vol. 35, no. 5, 2021, pp. 4027–4035." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 175, + 563, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 175, + 563, + 210 + ], + "spans": [ + { + "bbox": [ + 310, + 175, + 563, + 210 + ], + "type": "text", + "content": "[146] Z. Chen, D. Chen, X. Zhang, Z. Yuan, and X. Cheng, “Learning graph structures with transformer for multivariate time-series anomaly detection in IoT,” IEEE Internet Things J., vol. 9, no. 12, pp. 9179–9189, 2021." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 211, + 563, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 211, + 563, + 247 + ], + "spans": [ + { + "bbox": [ + 310, + 211, + 563, + 247 + ], + "type": "text", + "content": "[147] Y. Zheng, H. Y. Koh, M. Jin, L. Chi, K. T. Phan, S. Pan, Y.-P. P. Chen, and W. Xiang, \"Correlation-aware spatial-temporal graph learning for multivariate time-series anomaly detection,\" IEEE Trans. Neural Netw. Learn. Syst., 2023." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 248, + 563, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 248, + 563, + 282 + ], + "spans": [ + { + "bbox": [ + 310, + 248, + 563, + 282 + ], + "type": "text", + "content": "[148] Y. Liu, Z. Li, S. Pan, C. Gong, C. Zhou, and G. Karypis, \"Anomaly detection on attributed networks via contrastive self-supervised learning,\" IEEE Trans. Neural Netw. Learn. Syst., vol. 33, no. 6, pp. 2378-2392, 2022." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 284, + 563, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 284, + 563, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 284, + 563, + 319 + ], + "type": "text", + "content": "[149] H. Zhao, Y. Wang, J. Duan, C. Huang, D. Cao, Y. Tong, B. Xu, J. Bai, J. Tong, and Q. Zhang, \"Multivariate time-series anomaly detection via graph attention network,\" in Proc. 2020 IEEE Int. Conf. Data Min. (ICDM)). IEEE, 2020, pp. 841-850." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 320, + 563, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 320, + 563, + 356 + ], + "spans": [ + { + "bbox": [ + 310, + 320, + 563, + 356 + ], + "type": "text", + "content": "[150] W. Chen, L. Tian, B. Chen, L. Dai, Z. Duan, and M. Zhou, “Deep variational graph convolutional recurrent network for multivariate time series anomaly detection,” in Int. Conf. Mach. Learn. (ICML). PMLR, 2022, pp. 3621–3633." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 357, + 563, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 357, + 563, + 392 + ], + "spans": [ + { + "bbox": [ + 310, + 357, + 563, + 392 + ], + "type": "text", + "content": "[151] S. Han and S. S. Woo, \"Learning sparse latent graph representations for anomaly detection in multivariate time series,\" in Proc. 28th ACM SIGKDD Conf. Knowl. Discov. Data Min. (KDD), 2022, pp. 2977-2986." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 392, + 563, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 392, + 563, + 420 + ], + "spans": [ + { + "bbox": [ + 310, + 392, + 563, + 420 + ], + "type": "text", + "content": "[152] Y. Tang, L. Zhao, S. Zhang, C. Gong, G. Li, and J. Yang, \"Integrating prediction and reconstruction for anomaly detection,\" Pattern Recognit. Lett., vol. 129, pp. 123-130, 2020." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 420, + 563, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 420, + 563, + 456 + ], + "spans": [ + { + "bbox": [ + 310, + 420, + 563, + 456 + ], + "type": "text", + "content": "[153] M. Zheng, J. Man, D. Wang, Y. Chen, Q. Li, and Y. Liu, \"Semisupervised multivariate time series anomaly detection for wind turbines using generator scada data,\" Reliab. Eng. Syst. Saf., vol. 235, p. 109235, 2023." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 456, + 563, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 456, + 563, + 483 + ], + "spans": [ + { + "bbox": [ + 310, + 456, + 563, + 483 + ], + "type": "text", + "content": "[154] Y. Wei, J. Jang-Jaccard, W. Xu, F. Sabrina, S. Camtepe, and M. Boulic, \"Lstm-autoencoder-based anomaly detection for indoor air quality time-series data,\" IEEE Sens. J., vol. 23, no. 4, pp. 3787-3800, 2023." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 483, + 563, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 483, + 563, + 511 + ], + "spans": [ + { + "bbox": [ + 310, + 483, + 563, + 511 + ], + "type": "text", + "content": "[155] G. Pu, L. Wang, J. Shen, and F. Dong, “A hybrid unsupervised clustering-based anomaly detection method,” Tsinghua Sci. Technol., vol. 26, no. 2, pp. 146–153, 2020." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 511, + 563, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 511, + 563, + 537 + ], + "spans": [ + { + "bbox": [ + 310, + 511, + 563, + 537 + ], + "type": "text", + "content": "[156] B. Liu, Y. Xiao, L. Cao, Z. Hao, and F. Deng, \"Svdd-based outlier detection on uncertain data,\" Knowl. Inf. Syst., vol. 34, pp. 597-618, 2013." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 538, + 563, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 538, + 563, + 565 + ], + "spans": [ + { + "bbox": [ + 310, + 538, + 563, + 565 + ], + "type": "text", + "content": "[157] A. P. Muniyandi, R. Rajeswari, and R. Rajaram, \"Network anomaly detection by cascading k-means clustering and c4. 5 decision tree algorithm,\" *Proceedia Eng.*, vol. 30, pp. 174-182, 2012." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 565, + 563, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 565, + 563, + 601 + ], + "spans": [ + { + "bbox": [ + 310, + 565, + 563, + 601 + ], + "type": "text", + "content": "[158] A. M. Ikotun, A. E. Ezugwu, L. Abualigah, B. Abuhaija, and J. Heming, \"K-means clustering algorithms: A comprehensive review, variants analysis, and advances in the era of big data,\" Inf. Sci., vol. 622, pp. 178-210, 2023." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 601, + 563, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 601, + 563, + 628 + ], + "spans": [ + { + "bbox": [ + 310, + 601, + 563, + 628 + ], + "type": "text", + "content": "[159] H. V. Singh, A. Girdhar, and S. Dahiya, “A literature survey based on dbscan algorithms,” in Proc. 2022 6th Int. Conf. Intell. Comput. Control Syst. (ICICCS). IEEE, 2022, pp. 751-758." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 629, + 563, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 629, + 563, + 656 + ], + "spans": [ + { + "bbox": [ + 310, + 629, + 563, + 656 + ], + "type": "text", + "content": "[160] F. Murtagh and P. Contreras, “Algorithms for hierarchical clustering: an overview,” Wiley Interdiscip. Rev. Data Min. Knowl. Discov., vol. 2, no. 1, pp. 86–97, 2012." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 656, + 563, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 656, + 563, + 684 + ], + "spans": [ + { + "bbox": [ + 310, + 656, + 563, + 684 + ], + "type": "text", + "content": "[161] J. Li, H. Izakian, W. Pedrycz, and I. Jamal, \"Clustering-based anomaly detection in multivariate time series data,\" Appl. Soft Comput., vol. 100, p. 106919, 2021." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 684, + 563, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 684, + 563, + 719 + ], + "spans": [ + { + "bbox": [ + 310, + 684, + 563, + 719 + ], + "type": "text", + "content": "[162] A. Markovitz, G. Sharir, I. Friedman, L. Zelnik-Manor, and S. Avidan, \"Graph embedded pose clustering for anomaly detection,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2020, pp. 10539-10547." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 719, + 563, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 719, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 719, + 563, + 747 + ], + "type": "text", + "content": "[163] S. Qiu, J. Ye, J. Zhao, L. He, L. Liu, E. Bicong, and X. Huang, “Video anomaly detection guided by clustering learning,” Pattern Recognit., vol. 153, p. 110550, 2024." + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 613 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "text", + "content": "[164] I. Kobyzev, S. J. Prince, and M. A. Brubaker, “Normalizing flows: An introduction and review of current methods,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 11, pp. 3964–3979, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 84, + 301, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 84, + 301, + 110 + ], + "spans": [ + { + "bbox": [ + 47, + 84, + 301, + 110 + ], + "type": "text", + "content": "[165] J. Yu, Y. Zheng, X. Wang, W. Li, Y. Wu, R. Zhao, and L. Wu, \"Fastflow: Unsupervised anomaly detection and localization via 2d normalizing flows,\" arXiv preprint arXiv:2111.07677, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 111, + 301, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 111, + 301, + 137 + ], + "spans": [ + { + "bbox": [ + 47, + 111, + 301, + 137 + ], + "type": "text", + "content": "[166] M. Cho, T. Kim, W. J. Kim, S. Cho, and S. Lee, \"Unsupervised video anomaly detection via normalizing flows with implicit latent features,\" Pattern Recognit., vol. 129, p. 108703, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 138, + 301, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 138, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 138, + 301, + 156 + ], + "type": "text", + "content": "[167] Q. Zhou, S. He, H. Liu, J. Chen, and W. Meng, \"Label-free multivariate time series anomaly detection,\" IEEE Trans. Knowl. Data Eng., 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 156, + 301, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 156, + 301, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 156, + 301, + 182 + ], + "type": "text", + "content": "[168] E. Dai and J. Chen, \"Graph-augmented normalizing flows for anomaly detection of multiple time series,\" arXiv preprint arXiv:2202.07857, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 182, + 301, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 182, + 301, + 208 + ], + "spans": [ + { + "bbox": [ + 47, + 182, + 301, + 208 + ], + "type": "text", + "content": "[169] Y. Zhou, X. Liang, W. Zhang, L. Zhang, and X. Song, \"Vae-based deep svdd for anomaly detection,\" Neurocomputing, vol. 453, pp. 131-140, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "type": "text", + "content": "[170] Z. Zhang and X. Deng, \"Anomaly detection using improved deep svdd model with data structure preservation,\" Pattern Recognit. Lett., vol. 148, pp. 1-6, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 237, + 301, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 237, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 237, + 301, + 262 + ], + "type": "text", + "content": "[171] J. Luo, J. Lin, Z. Yang, and H. Liu, \"Smd anomaly detection: A self-supervised texture-structure anomaly detection framework,\" IEEE Trans. Instrum. Meas., vol. 71, pp. 1-11, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 263, + 301, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 301, + 290 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 301, + 290 + ], + "type": "text", + "content": "[172] C.-L. Li, K. Sohn, J. Yoon, and T. Pfister, \"Cutpaste: Self-supervised learning for anomaly detection and localization,\" in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2021, pp. 9664-9674." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 291, + 301, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 291, + 301, + 317 + ], + "spans": [ + { + "bbox": [ + 47, + 291, + 301, + 317 + ], + "type": "text", + "content": "[173] R. Liu, W. Liu, Z. Zheng, L. Wang, L. Mao, Q. Qiu, and G. Ling, \"Anomaly-gan: A data augmentation method for train surface anomaly detection,\" Expert Syst. Appl., vol. 228, p. 120284, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 317, + 301, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 317, + 301, + 343 + ], + "spans": [ + { + "bbox": [ + 47, + 317, + 301, + 343 + ], + "type": "text", + "content": "[174] Q. Wen, L. Sun, F. Yang, X. Song, J. Gao, X. Wang, and H. Xu, \"Time series data augmentation for deep learning: A survey,\" arXiv preprint arXiv:2002.12478, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 344, + 301, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 344, + 301, + 371 + ], + "spans": [ + { + "bbox": [ + 47, + 344, + 301, + 371 + ], + "type": "text", + "content": "[175] H. Hojjati, T. K. K. Ho, and N. Armanfard, \"Self-supervised anomaly detection in computer vision and beyond: A survey and outlook,\" Neural Networks, vol. 172, p. 106106, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 372, + 301, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 372, + 301, + 406 + ], + "spans": [ + { + "bbox": [ + 47, + 372, + 301, + 406 + ], + "type": "text", + "content": "[176] X. Zhang, M. Xu, and X. Zhou, “Realnet: A feature selection network with realistic synthetic anomaly for anomaly detection,” in Proc. IEEE/CVF Conf. Comput. Vis. Pattern Recognit., 2024, pp. 16699–16708." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 407, + 301, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 301, + 434 + ], + "type": "text", + "content": "[177] F. Van Wyk, Y. Wang, A. Khojandi, and N. Masoud, “Real-time sensor anomaly detection and identification in automated vehicles,” IEEE Trans. Intell. Transp. Syst., vol. 21, no. 3, pp. 1264–1276, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 434, + 301, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 434, + 301, + 468 + ], + "spans": [ + { + "bbox": [ + 47, + 434, + 301, + 468 + ], + "type": "text", + "content": "[178] M. Abouof, R. Mizouni, S. Singh, H. Otrok, and E. Damiani, \"Self-supervised online and lightweight anomaly and event detection for IoT devices,\" IEEE Internet Things J, vol. 9, no. 24, pp. 25 285-25 299, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 469, + 301, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 469, + 301, + 505 + ], + "spans": [ + { + "bbox": [ + 47, + 469, + 301, + 505 + ], + "type": "text", + "content": "[179] X. Zhou, J. Wu, W. Liang, I. Kevin, K. Wang, Z. Yan, L. T. Yang, and Q. Jin, \"Reconstructed graph neural network with knowledge distillation for lightweight anomaly detection,\" IEEE Trans. Neural Netw. Learn. Syst., 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 506, + 301, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 506, + 301, + 532 + ], + "spans": [ + { + "bbox": [ + 47, + 506, + 301, + 532 + ], + "type": "text", + "content": "[180] Y. Zhao, G. H. Chen, and Z. Jia, “Tod: GPU-accelerated outlier detection via tensor operations,” arXiv preprint arXiv:2110.14007, 2021." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 533, + 301, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 533, + 301, + 559 + ], + "spans": [ + { + "bbox": [ + 47, + 533, + 301, + 559 + ], + "type": "text", + "content": "[181] A. Al-Mazrawe and B. Al-Musawi, “Anomaly detection in cloud network: A review,” in BIO Web of Conferences, vol. 97. EDP Sciences, 2024, p. 00019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 559, + 301, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 301, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 301, + 578 + ], + "type": "text", + "content": "[182] Z. Niu, G. Zhong, and H. Yu, “A review on the attention mechanism of deep learning,” Neurocomputing, vol. 452, pp. 48–62, 2021." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 578, + 301, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 578, + 301, + 613 + ], + "spans": [ + { + "bbox": [ + 47, + 578, + 301, + 613 + ], + "type": "text", + "content": "[183] H. Liu, X. Huang, M. Jia, T. Jia, J. Han, Y. Li, and Z. Wu, \"Uac-ad: Unsupervised adversarial contrastive learning for anomaly detection on multi-modal data in microservice systems,\" IEEE Trans. Serv. Comput., 2024." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "spans": [ + { + "bbox": [ + 47, + 23, + 261, + 33 + ], + "type": "text", + "content": "IEEE INTERNET OF THINGS JOURNAL, VOL. X, NO. X, XXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_content_list.json b/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4ef8ea546a3383803ed035a3c5f9058aef40dc12 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_content_list.json @@ -0,0 +1,14710 @@ +[ + { + "type": "text", + "text": "The Amazon Nova Family of Models: Technical Report and Model Card", + "text_level": 1, + "bbox": [ + 274, + 122, + 727, + 174 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3156a9077f1c972bfe8d4f5736cc7cb801a543c0a7e1872ae7041bb75bf072ce.jpg", + "image_caption": [ + "Figure 1: The Amazon Nova family of models" + ], + "image_footnote": [], + "bbox": [ + 112, + 277, + 467, + 583 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1888da7922d07148b8348987b53dfc4837e982a06f12992d09585bdf6e01d4e8.jpg", + "image_caption": [ + "Amazon Artificial General Intelligence" + ], + "image_footnote": [], + "bbox": [ + 532, + 345, + 883, + 429 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6eb562a9d279dd55a2a329e5996f1b4fa88c3aa97c915eefdfe3c4fad694eb2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 498, + 883, + 583 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 662, + 539, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance. Amazon Nova Pro is a highly-capable multimodal model with the best combination of accuracy, speed, and cost for a wide range of tasks. Amazon Nova Lite is a low-cost multimodal model that is lightning fast for processing images, video, documents and text. Amazon Nova Micro is a text-only model that delivers our lowest-latency responses at very low cost. Amazon Nova Canvas is an image generation model that creates professional grade images with rich customization controls. Amazon Nova Reel is a video generation model offering high-quality outputs, customization, and motion control. Our models were built responsibly and with a commitment to customer trust, security, and reliability. We report benchmarking results for core capabilities, agentic performance, long context, functional adaptation, runtime performance, and human evaluation.", + "bbox": [ + 169, + 693, + 826, + 847 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2506.12103v1 [cs.AI] 17 Mar 2025", + "bbox": [ + 22, + 267, + 58, + 707 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Contents", + "text_level": 1, + "bbox": [ + 112, + 89, + 194, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 Introduction 3", + "text_level": 1, + "bbox": [ + 114, + 117, + 883, + 131 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1.1 Amazon Nova Pro, Lite, and Micro 3", + "1.2 Amazon Nova Canvas and Reel 3" + ], + "bbox": [ + 140, + 132, + 883, + 157 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Amazon Nova Pro, Lite, and Micro Evaluations 5", + "text_level": 1, + "bbox": [ + 112, + 171, + 883, + 185 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Core capability public benchmarks 5", + "bbox": [ + 138, + 186, + 883, + 198 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2.1.1 Core capability text benchmarks and results 5", + "2.1.2 Core capability multimodal benchmarks and results 7" + ], + "bbox": [ + 174, + 200, + 883, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Agentic workflows 8", + "bbox": [ + 138, + 227, + 883, + 239 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2.2.1 Agentic text benchmarks and results 9", + "2.2.2 Agentic multimodal benchmarks and results 9" + ], + "bbox": [ + 174, + 241, + 883, + 268 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3 Long context 10", + "bbox": [ + 138, + 268, + 883, + 281 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.4 Functional expertise 11", + "bbox": [ + 138, + 282, + 883, + 296 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2.4.1 Software engineering 12", + "2.4.2 Financial analysis 12", + "2.4.3 Retrieval augmented generation 12" + ], + "bbox": [ + 174, + 297, + 883, + 337 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.5 Runtime performance 13", + "bbox": [ + 138, + 338, + 883, + 351 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 Amazon Nova Canvas Evaluation 15", + "text_level": 1, + "bbox": [ + 114, + 363, + 883, + 376 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3.1 Automated metrics 15", + "3.2 Human evaluation 15" + ], + "bbox": [ + 138, + 378, + 883, + 404 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "4 Amazon Nova Reel Evaluation 16", + "text_level": 1, + "bbox": [ + 114, + 417, + 883, + 430 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4.1 Human evaluation metrics 16", + "4.2 Dataset 16", + "4.3 Implementation details & results 17" + ], + "bbox": [ + 138, + 431, + 883, + 472 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "5 Responsible AI 17", + "text_level": 1, + "bbox": [ + 114, + 484, + 883, + 498 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5.1 Defining our RAI objectives 17", + "5.2 Ensuring adherence to RAI objectives 18", + "5.3 RAI Evaluation 19", + "5.4 Red Teaming 19" + ], + "bbox": [ + 138, + 500, + 883, + 554 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5.4.1 Internal Red Teaming 19", + "5.4.2 External Red Teaming 20", + "5.4.3 Automated Red Teaming 21" + ], + "bbox": [ + 174, + 555, + 883, + 595 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "6 Training Infrastructure 21", + "text_level": 1, + "bbox": [ + 114, + 607, + 883, + 622 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A Amazon Nova Canvas Capabilities 28", + "B Prompts and Scoring 30", + "C Qualitative examples of multimodal intelligence 39", + "D Correspondence and Contributors 43" + ], + "bbox": [ + 114, + 633, + 883, + 727 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 490, + 935, + 501, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 89, + 253, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This document introduces Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance.", + "bbox": [ + 109, + 122, + 883, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "1.1 Amazon Nova Pro, Lite, and Micro", + "text_level": 1, + "bbox": [ + 112, + 169, + 398, + 184 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Key capabilities of Amazon Nova Pro, Lite, and Micro include:", + "bbox": [ + 112, + 196, + 532, + 210 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Frontier intelligence: Amazon Nova models possess frontier intelligence, enabling them to understand and process complex language tasks with state-of-the-art accuracy. Amazon Nova Micro sets new standards in its intelligence tier in several text benchmarks such as Language Understanding (MMLU), Deep Reasoning (GPQA), Mathematics (MATH), and Multi-step Reasoning (Big-Bench Hard). Our multimodal models, Amazon Nova Pro and Lite, take text, images, documents, and video as input and generate text as output. These models set standards in several benchmarks such as Video Captioning (VATEX), Visual QA (TextVQA), Function Calling (BFCL), and multimodal agentic benchmarks (GroundUI-1K, VisualWebBench, Mind2Web) in their respective intelligence tiers. These models are the first to offer video understanding capabilities on Amazon Bedrock, enabling deeper insights from multimedia content.", + "- Speed: Amazon Nova has been designed for fast inference, with Amazon Micro, Lite, and Pro each being one of the fastest models in their respective intelligence tiers.", + "- Agentic Workflows: Amazon Nova Pro, Lite, and Micro can power AI agents capable of breaking down and executing multi-step tasks. These models are integrated with Bedrock Knowledge Bases and they excel at retrieval-augmented generation (RAG) to ensure the best accuracy by grounding their responses to the developer's data.", + "- Customizability: Developers can fine-tune these models with multimodal data (Pro and Lite) or text data (Pro, Lite, and Micro), providing the flexibility to achieve desired accuracy, latency, and cost. Developers can also run self-service Custom Fine-Tuning (CFT) and distillation of larger models to smaller ones via Bedrock APIs.", + "- Price-Performance: Each model was optimized to deliver exceptional price-performance value, offering state-of-the-art performance on key benchmarks at low cost." + ], + "bbox": [ + 156, + 224, + 883, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Amazon Nova Pro, Lite, and Micro are based on the Transformer architecture [74]. Each model went through a series of training processes that began with pretraining using a mixture of large amounts of multilingual and multimodal data. Our models were trained on data from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. We curated data from over 200 languages, with particular emphasis on Arabic, Dutch, English, French, German, Hebrew, Hindi, Italian, Japanese, Korean, Portuguese, Russian, Simplified Chinese, Spanish, and Turkish. After pretraining, models iteratively went through a series of fine-tuning stages, including Supervised Fine-Tuning (SFT) on instruction-demonstration pairs (including multimodal ones) and reward model (RM) training from human preference data [59]. Finally, the models learned from human preferences via methods like Direct Preference Optimization (DPO) [62] and Proximal Policy Optimization (PPO) [68] to ensure that the final models are aligned with human preferences in both quality and responsibility.", + "bbox": [ + 109, + 547, + 883, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "1.2 Amazon Nova Canvas and Reel", + "text_level": 1, + "bbox": [ + 112, + 704, + 374, + 718 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Amazon Nova Canvas and Amazon Nova Reel are designed to create realistic multimodal content, including images and videos, for a wide range of applications such as advertising, marketing, and entertainment.", + "bbox": [ + 109, + 731, + 883, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Amazon Nova Canvas offers the following functionalities, with more details provided in Appendix A:", + "bbox": [ + 111, + 765, + 779, + 780 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Text-to-image generation: Amazon Nova Canvas can generate images with various resolutions (from 512 up to 2K horizontal resolution) and aspect ratios (any aspect ratio between 1:4 and 4:1 with a maximum of 4.2M pixels). Customers can provide reference images to guide the model to generate outputs in a specific style or color palette, or to generate variations of an image.", + "- Image editing: Amazon Nova Canvas allows precise image editing operations like inpainting and outpainting through natural language mask prompts. These mask prompts describe the specific area of the input image that needs to be repaired. The user can also easily change a background with the background removal feature leaving the subject of the image unchanged." + ], + "bbox": [ + 156, + 792, + 880, + 910 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 490, + 935, + 501, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Amazon Nova Reel offers the following functionalities:", + "bbox": [ + 112, + 90, + 480, + 106 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Generate videos from a text prompt: Amazon Nova Reel can generate high-quality videos of 6-second duration (720p resolution at 24 frames per second) from a text prompt.", + "- Generate videos from a reference image and a prompt: Amazon Nova Reel brings images to motion and generates videos that are guided by the input image and a text prompt.", + "- Camera motion control using a text prompt: With camera motion control in Amazon Nova Reel, the user can guide camera motion with text prompts like \"zoom\" and \"dolly forward\" to get the exact visual needed for each video. Amazon Nova Reel supports more than 20 camera motions. For more details, please refer to our prompting guide1." + ], + "bbox": [ + 156, + 117, + 880, + 237 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Amazon Nova Canvas and Reel are latent diffusion models [61] where a Variational AutoEncoder (VAE) [41] maps the image or video frames to latent variables on which the diffusion process happens. A text encoder tokenizes input text prompts into tokens which are then passed to the diffusion model as a conditioning signal. At inference time, a latent variable is initialized with random noise sampled from a Gaussian distribution, which is then denoised by the trained diffusion model iteratively into a clean latent variable. The clean latent variable is decoded back to images or video frames by the decoder of the VAE. Both models underwent a two-phased approach of pretraining and fine-tuning. Pretraining data were sourced from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. Our highly scalable data filtering, dedduplication, and enrichment pipelines were based on AWS EMR [2] and AWS Batch [1], as well as other AWS services.", + "bbox": [ + 111, + 250, + 883, + 375 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "tok/secMMLUARC-CDROPGPQAMATHGSM8kIFEvalBBHaccuracyaccuracyF1-scoreaccuracyaccuracyaccuracyinstruction-level loose accuracyaccuracyNova Pro10085.994.8±1.385.4±0.746.9±4.676.6±1.294.8±1.292.1±1.886.9Nova Lite15780.592.4±1.580.2±0.842.0±4.673.3±1.294.5±1.289.7±2.182.4Nova Micro21077.690.2±1.779.3±0.840.0±4.569.3±1.392.3±1.487.2±2.379.50-shot CoT0-shot6-shot CoT0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoTClaude 3.5 Sonnet (Oct)5789.396.3M±1.188.3±0.658.0M±4.678.3±1.196.5M±1.090.2*±2.093.2Claude 3.5 Haiku6480.390.9M±1.683.1±0.837.5M±4.569.4±1.393.8M±1.385.9*±2.486.60-shot CoT25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoTGemini 1.5 Pro (002)5885.995.4M±1.274.9±0.955.1M±4.686.5±0.990.8±1.691.7M±1.989.2Gemini 1.5 Flash (002)19078.994.3M±1.378.4±0.845.1M±4.677.9±1.286.2±1.991.6M±1.985.5Gemini 1.5 Flash 8B (001)28368.188.7M±1.868.1M±0.933.5M±4.458.7±1.484.5M±2.086.1M±2.369.55-shot25-shot3-shot0-shot4-shot11-shot0-shot3-shotGPT-4o16388.796.2M±1.183.4±0.748.4M±4.676.6±1.292.6M±1.489.8M±2.183.0MGPT-4o Mini11382.092.3M±1.579.7±0.841.7M±4.670.2±1.386.4M±1.887.4M±2.381.0M0-shot25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shotLlama 3.2 90B4086.094.8±1.3-46.7±4.668.0±1.395.1±1.290.9M±2.0-Llama 3.2 11B12473.083.4±2.1-32.8±4.351.9±1.484.5±2.085.0M±2.4-Llama 3.1 8B15773.083.4±2.1-30.4±4.351.9±1.484.5±2.085.0M±2.4-0-shot CoT25-shot-0-shot CoT0-shot CoT8-shot CoT--", + "bbox": [ + 114, + 125, + 883, + 760 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 490, + 935, + 501, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1 summarizes the quantitative results of Nova models and select public models on the aforementioned benchmarks for core capabilities. When available, we reference the highest publicly-reported numbers for each benchmark from the official technical reports and websites for Claude, Gemini, OpenAI and Llama family of models. Amazon Nova Pro, Lite, and Micro demonstrate strong performance across all benchmarks, showcasing their advanced core intelligence, particularly Amazon Nova Micro and Lite on math, reasoning, and instruction following benchmarks.", + "bbox": [ + 109, + 90, + 887, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also evaluate the translation capabilities of Nova models. Flores200 [73, 34, 35], or simply Flores, is a machine translation benchmark consisting of translations from 842 distinct web articles, which tests the translation capabilities between English and non-English languages. Sentences are 21 words long on average. We use a 0-shot setup and report the macro average of two metrics, spBleu and COMET22 score [63] across a set of languages (Arabic, German, Spanish, French, Hindi, Italian, Japanese, Korean, Portuguese, Hebrew, Turkish, Simplified Chinese, Russian, Dutch) for translation from and into English. The prompts used for evaluation are summarized in Appendix B.1. Table 2 summarizes our quantitative results on Flores, demonstrating strong multilingual performance on translation for Amazon Nova Pro, Lite, and Micro.", + "bbox": [ + 109, + 166, + 883, + 277 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/df5d97728a52aeb8f9891aed812b4ce2cc7084518fcd2dfcb1ac34d32988bbd2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FLORES (0-shot)
en → Set1Set1 → en
tok/secspBleu (↑)COMET22 (↑)spBleu (↑)COMET22 (↑)
Nova Pro10043.489.144.489.0
Nova Lite15741.588.843.188.8
Nova Micro21040.288.542.688.7
Claude 3.5 Sonnet (Oct)5742.5M89.4M43.5M89.1M
Claude 3.5 Haiku6440.0M88.5M40.2M88.3M
Gemini 1.5 Pro (002)5743.0M*89.1M*45.6M*89.1M*
Gemini 1.5 Flash (002)19040.0M*88.5M*42.9M*88.8M*
Gemini 1.5 Flash 8B (001)28338.2M*88.0M*41.4M*88.5M*
GPT-4o16343.1M*89.2M*43.9M*89.0M*
GPT-4o Mini11341.1M*88.7M*41.9M*88.7M*
Llama 3.2 90B4039.7M88.2M43.7M88.5M
Llama 3.2 11B12433.0M85.7M36.3M86.3M
Llama 3.1 8B15732.7M85.5M36.5M86.5M
", + "bbox": [ + 151, + 287, + 848, + 595 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Quantitative results on Flores200 [34], a machine translation benchmark. Set1 refers to {de, es, fr, it, pt, ja, ar, hi, ru, nl, tr, he, ko, zh}. Results marked with $M$ were measured by us. Results marked with an asterisk (*) were obtained using an alternate prompt which can be found in Appendix B.1 Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5.", + "bbox": [ + 109, + 599, + 888, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2.1.2 Core capability multimodal benchmarks and results", + "text_level": 1, + "bbox": [ + 109, + 688, + 529, + 704 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section we evaluate the multimodal capabilities of Amazon Nova models on a diverse set of public benchmarks. Our selection of multimodal benchmarks aims to probe for various capabilities, including natural image understanding, document understanding with charts and graphs, text understanding, and temporal reasoning in videos. For all benchmarks, we follow the suggested metrics and choice of data split for evaluation. The following list briefly describes the selected benchmarks.", + "bbox": [ + 109, + 712, + 887, + 781 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MMMU [85]: The Massive Multi-discipline Multimodal Understanding benchmark consists of college-level multiple-choice and open-ended questions from 30 different disciplines. We use Chain-of-Thought (CoT) prompting for this benchmark and report accuracy.", + "ChartQA [50]: The 2,500 questions of this benchmark cover three different types of charts (bar, line and pie) and require strong visual, logical, and arithmetical reasoning capabilities. We evaluate on the test set and report relaxed accuracy.", + "- DocVQA [51]: This benchmark probes capabilities on document analysis and recognition, including Optical Character Recognition (OCR). The 5,349 questions contain images from a diverse set of documents, ranging" + ], + "bbox": [ + 156, + 791, + 883, + 912 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 490, + 935, + 501, + 946 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/499a56f552f88cdf2d3fa5cc1b35e5ff796ec6798d845b6bc78b4780986c4b50.jpg", + "table_caption": [], + "table_footnote": [ + "Table 3: Quantitative results on four image understanding benchmarks (MMMU [85], ChartQA [50], DocVQA [51], TextVQA [70]) and 2 video understanding benchmarks (VATEX [78] and EgoSchema [49]). Higher numbers are better for all benchmarks $(\\uparrow)$ . Unless otherwise noted, all evaluations are 0-shot and reference numbers are taken from the original technical reports and websites for Claude models [11, 12], GPT4 models [56, 55], Llama models [45, 53] and Gemini models [32, 33]. Remarks: (A) 4-shot evaluation; (B) External Optical Character Recognition (OCR) was used; (C) All models except Amazon Nova use CoT; (D) GPT-4o (Nov); (E) Gemini 1.5 Flash/Pro (002) models; (F) Reported in [33]; (G) Reported in [4]; (M) Claude 3.5 Sonnet and Llama 3.2 results for TextVQA as well as GPT4o and GPT4o mini results on ChartQA, TextVQA and VATEX were measured by us. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + ], + "table_body": "
MMMU (CoT)Chart QAcDoc VQAText VQAVATEXEgo Schema
tok/secvaltesttestvaltesttest
accuracyrelaxed accuracyANLSweighted accuracyCIDEraccuracy
Amazon Nova Pro10061.7 ±3.289.2 ±1.293.581.577.872.1 ±5.4
Amazon Nova Lite15756.2 ±3.286.8 ±1.392.480.277.871.4 ±5.4
Claude 3.5 Sonnet (Oct)5770.4 ±3.090.8 ±1.194.261.7M--
Claude 3 Haiku6450.2 ±3.382.0 ±1.588.8---
Gemini 1.5 Pro (001)5865.9 ±3.1E87.2 ±1.393.1B78.764.6A72.2 ±5.4
Gemini 1.5 Flash (001)19062.3 ±3.2E85.4 ±1.489.9B78.757.165.7 ±5.7
Gemini 1.5 Flash 8B (001)28353.7 ±3.3F78.2 ±1.6G73.666.753.2A-
GPT-4o (May)-69.1 ±3.085.7 ±1.492.877.2DM-72.2 ±5.4
GPT-4o Mini (Jul)11359.4 ±3.279.2 ±1.6M-70.3M--
Llama 3.2 90B4060.3 ±3.285.5 ±1.490.180.7M--
Llama 3.2 11B12450.7 ±3.383.4 ±1.588.471.3M--
", + "bbox": [ + 114, + 87, + 883, + 371 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "from 1940 to 2020 and covering multiple industries. We report Average Normalized Levenshtein Similarity (ANLS).", + "bbox": [ + 169, + 537, + 883, + 565 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- TextVQA [70]: The 5,000 samples of this dataset focus specifically on text-reading capabilities (OCR) in natural images. We report weighted accuracy on the validation set.", + "- VATEX [78]: This video captioning benchmark covers a diverse set of human activities. We evaluate on the public test set containing videos with a length of around 10 seconds. The CIDEr [75] score is used for evaluation.", + "- EgoSchema [49]: The unique characteristic of this long-form video question answering benchmark is its high \"certificate length\" [15], which is, loosely speaking, the time it takes a human to verify the video description. The videos cover a broad range of natural human activities and come with human-curated multiple-choice question-answer pairs." + ], + "bbox": [ + 156, + 571, + 880, + 709 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3 summarizes our quantitative results on multiple image and video understanding benchmarks. Amazon Nova Pro and Lite achieve high scores across all benchmarks. Chart understanding on ChartQA and video understanding on VATEX stand out, where Nova models rank either first or second. We provide the prompt templates for all benchmarks in Appendix B.2, as well as qualitative examples in Appendix C.", + "bbox": [ + 109, + 720, + 883, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2.2 Agentic workflows", + "text_level": 1, + "bbox": [ + 112, + 794, + 282, + 809 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Amazon Nova Pro, Lite, and Micro models can be used as agents. An agent considers a suite of tools and APIs, reasons about the user's request and past conversational history, chooses if a tool should be used and, if so, decides which tool to use, invokes the tool, assesses the outcome from the tool, and then communicates back with the user [83, 67, 46, 60].", + "bbox": [ + 109, + 821, + 883, + 864 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To this end, we evaluated our Nova models on agentic workflows that require textual understanding and visual reasoning. For textual understanding (Section 2.2.1), we used the Berkeley Function Calling Leaderboard benchmark to test our models' capabilities in function calling and orchestrating real-world applications. For visual reasoning (Section 2.2.2),", + "bbox": [ + 109, + 869, + 883, + 912 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 490, + 935, + 501, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "we evaluate on three benchmarks that require image understanding capabilities for correct function calling. We highlight that both Amazon Nova Pro and Lite models set a new state of the art on these challenging benchmarks.", + "bbox": [ + 109, + 90, + 883, + 122 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "2.2.1 Agentic text benchmarks and results", + "text_level": 1, + "bbox": [ + 112, + 136, + 421, + 150 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4 presents quantitative results on the Berkeley Function Calling Leaderboard v3 (BFCL).3 Stemming from the Gorilla project [60], the revamped BFCL [81] benchmark evaluates a model's ability to accurately call and utilize real-world functions, or tools, based on a user's natural language request. Amazon Nova models particularly excel in the Abstract Syntax Tree (AST), Execution, and Relevance metrics, as well as overall scores versus comparable models. Amazon Nova Lite and Micro also had the lowest latency of the selected models.", + "bbox": [ + 109, + 159, + 883, + 231 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Table 4, AST measures the exact match function calling performance of the model when comparing function names and argument/value signatures to a human-curated ground truth. While AST allows for some soft matching based on manually-defined, permitted argument values (e.g., different date formats), Execution measures a function call's accuracy not by the call signature itself, but by comparing the return value of the call when executed against a real API.", + "bbox": [ + 109, + 234, + 883, + 292 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To measure the rate of hallucination, Irrelevance measures the model's ability to recognize that it does not have the appropriate functions available to help the user, and should therefore not call any. Relevance, as the opposite of irrelevance, measures the model's ability to recognize it indeed does have the functions necessary to help the user (but does not verify function signature accuracy). For both metrics, higher numbers are better.", + "bbox": [ + 109, + 297, + 883, + 354 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/4c9b6e696a10b010149f3053995de7bcfebf5ebbcc7d50857945ce6d01b19f02.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
OverallLatencyNon-LiveLiveMulti-TurnHallucination
accuracy(↑)seconds(↓)AST(↑)execution(↑)overall(↑)overall(↑)relevance(↑)irrelevance(↑)
Nova Pro68.41.090.189.871.545.195.165.1
Nova Lite66.60.687.586.466.050.397.649.1
Nova Micro56.20.587.289.767.415.587.857.6
Claude Sonnet 3.5 (Jun)61.33.970.066.374.740.068.374.6
Claude Haiku 340.41.541.747.557.720.697.629.4
Gemini 1.5 Pro (002)59.83.088.091.474.316.375.675.1
Gemini 1.5 Flash (002)55.31.179.780.673.212.578.175.7
Llama 3.2 90BA54.3N/A88.989.361.114.392.758.4
Llama 3.2 11BA49.9N/A83.687.357.910.578.141.6
GPT-4o (Aug)68.91.585.985.675.445.363.482.9
GPT-4o-mini (Jul)60.71.684.384.170.228.380.571.8
", + "bbox": [ + 114, + 367, + 885, + 646 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: Results on the Berkeley Function Calling Leaderboard (BFCL) v3 as of the Nov 17th, 2024 update. We include the latest versions of the models available on the leaderboard at that time. (A) We use leaderboard results for Llama 3.1 8B and 70B for Llama 3.2 11B and 90B, respectively, given the shared text LLM.", + "bbox": [ + 109, + 648, + 883, + 691 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "2.2.2 Agentic multimodal benchmarks and results", + "text_level": 1, + "bbox": [ + 112, + 726, + 477, + 742 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The Amazon Nova Pro and Lite models provide native support for multimodal inputs, including agentic workflows. In this section, we present results from our models on three different benchmarks that require agents to navigate websites to solve real-world tasks. Websites are typically represented as screenshots in these datasets to correctly convey all style elements and visual data as rendered in a standard web browser.", + "bbox": [ + 109, + 750, + 883, + 808 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "- VisualWebBench [43]: This benchmark includes seven core tasks related to web browsing, including captioning, question answering, OCR, action prediction, and grounding. All models are evaluated on 1,536 samples that span more than 100 websites from 12 domains. The final metric is the average over different metrics for the individual core tasks.", + "bbox": [ + 156, + 818, + 883, + 875 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "${}^{3}$ BFCL is a fast-moving, live benchmark. We report results using the state of the repository and website leaderboard as of Nov 17th, 2024 (commit 8226d).", + "bbox": [ + 112, + 883, + 883, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 490, + 935, + 501, + 946 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MM-Mind2Web [86]: This extension of the original Mind2Web [24] benchmark links samples with the original website screenshots, making it multimodal. An agent needs to select an element and pick one of three elementary actions (click, type, or select) alongside a value for some actions. We report micro average over the per-sample step accuracy, where an agent is successful only if element and action selection, as well as the predicted value, are correct.", + "- GroundUI-1K [87]: This benchmark is composed of multiple existing datasets, including Mind2Web [24] and repurposes them as a grounding task. On 1,000 samples for evaluation, a multimodal agent is given an instruction and a screenshot of a website from a wide variety of domains and asked to predict the 2D location of the desired UI element. The agent is correct if its predicted 2D location is within the ground truth bounding box." + ], + "bbox": [ + 156, + 90, + 879, + 231 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/7dad23d08aedb09972c224decfe86591bd5b6b4bfd30999e7dea4dd1df54b9ab.jpg", + "table_caption": [ + "Table 5 shows the results of our models on multimodal agent workflows along with other publicly-reported results. Both Amazon Nova models, Lite and Pro, demonstrate strong visual reasoning and agentic capabilities and achieve high scores on all three benchmarks." + ], + "table_footnote": [], + "table_body": "
VisualWebBench\ncompositEdMM-Mind2Web\nstep accuracyGroundUI-1K\naccuracy
Nova Pro79.763.781.4
Nova Lite77.760.780.2
Claude 3.5 Sonnet (Oct)76.7M61.6M16.3
GPT-4o (Nov)77.5M55.0M13.4C
GPT-4o Mini (Jul)71.3M58.6M7.2M
GPT-4 (Apr)64.636.8A-
Gemini 1.5 Pro (002)76.4M58.4M35.2B
Gemini 1.5 Flash (002)76.1M46.2M59.9M
Gemini 1.0 Pro (001)48.017.9A-
Llama 3.2 90B73.2M21.6M8.3M
Llama 3.2 11B65.1M22.1M3.7M
", + "bbox": [ + 189, + 294, + 805, + 547 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 5: Quantitative results on three multi-modal agentic benchmarks: VisualWebBench [43], MM-Mind2Web [86] and GroundUI-1K [87]. Reference numbers are taken from the corresponding benchmark papers [43, 86, 87] and leaderboard [3]. Remarks: (A) uses in-context learning (ICL) (please note that Amazon Nova models do not need to rely on in-context examples); (B) Gemini 1.5 Pro (001); (C) GPT-4o (May); (D) Macro average over individual metrics; (M) Measured by us.", + "bbox": [ + 109, + 551, + 883, + 619 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "2.3 Long context", + "text_level": 1, + "bbox": [ + 112, + 652, + 246, + 667 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We evaluate Amazon Nova Pro, Lite, and Micro on tasks that require the models to understand and reason over long context. These skills are crucial for tasks such as long multi-turn conversations, reasoning over long lists of retrieved documents, or understanding long videos. Amazon Nova Micro, Lite, and Pro models support context lengths of 128k, 300k, and 300k tokens, respectively. We used the following benchmarks to evaluate our models' long context performance:", + "bbox": [ + 109, + 676, + 883, + 748 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Text Needle-in-a-Haystack (NIAH): Following [40], we assessed each model's ability to locate specific information (the \"needle\") within extensive contexts (the \"haystack\"). This \"needle-in-a-haystack\" test evaluates the model's performance on context lengths starting at $32\\mathrm{k}$ , allowing us to measure its ability to accurately retrieve information across varying lengths of input context.", + "- SQuALITY [76] (ZeroScrolls Benchmark [69]): Focused on query-based summarization of literary stories, this task evaluates the model's capacity to generate relevant summaries from large contexts.", + "- LVBench [77]: This multimodal benchmark includes questions about YouTube videos from various domains such as TV series, sports, broadcasts, and surveillance footage. The LVBench dataset consists of 99 videos and 1,549 questions, covering six different types of tasks such as reasoning, event understanding and summarization." + ], + "bbox": [ + 156, + 756, + 880, + 887 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "4https://huggingface.co/datasets/AIWinter/LVBench", + "bbox": [ + 133, + 896, + 514, + 911 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/a575838c54ef1139078534f10109c9a2c9a9db02f73565857edd5970bcc3d3d6.jpg", + "image_caption": [ + "Figure 2: Text Needle-in-a-Haystack recall performance for Nova Micro (up-to 128k), Nova Lite (up-to 300k) and Nova Pro (up-to 300k) models." + ], + "image_footnote": [], + "bbox": [ + 215, + 95, + 339, + 324 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/62810bb4a4f0c1a6ec5b253cc0bdfe5416772d4bd04d5e56463fb15a82e82c78.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 95, + 524, + 324 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/0cbd85126bd687b64f349529061c5f9e6d085266463731bb8ec5d4319a9c86c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 558, + 95, + 769, + 324 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/3812b6443d8505c899375917bc3608643f3f56d03d8761c7851c7a38dc3c85ce.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SQuALITY ROUGE-LLVBench accuracy
Nova Pro19.8 ±8.741.6 ±2.5
Nova Lite19.2 ±8.640.4 ±2.4
Nova Micro18.8 ±8.6-
Claude 3.5 Sonnet (Jun)13.4 ±7.5-
Gemini 1.5 Pro (001)-33.1 ±2.3
Gemini 1.5 Pro (002)19.1 ±8.6M-
Gemini 1.5 Flash (002)18.1 ±8.4M-
GPT-4o18.8 ±8.630.8 ±2.3
Llama 3 - 70B16.4 ±8.1-
Llama 3 - 8B15.3 ±7.9-
", + "bbox": [ + 267, + 410, + 733, + 645 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 6: Text and Multimodal long context performance on SQuALITY (ROUGE-L) and LVBench (Accuracy). For SQuALITY, measurements for Claude 3.5 Sonnet, GPT-4o, Llama 3 70B and Llama 3 8B are taken from the Llama 3 report [45]. Gemini results were measured by $\\mathrm{us}^2$ ( $M$ ). For LVBench, Gemini and GPT-4o numbers were taken from the corresponding benchmark leaderboard [77].", + "bbox": [ + 111, + 647, + 883, + 705 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Results for text and multimodal long context benchmarks are presented in Table 6. In the long video question answering task, both Amazon Nova Pro and Lite demonstrate robust performance on the LVBench dataset, surpassing other models. Amazon Nova models consistently demonstrate exceptional performance in retrieving information from any depth across both text and multimodal understanding use cases, delivering high accuracy and reliability.", + "bbox": [ + 111, + 739, + 883, + 797 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "2.4 Functional expertise", + "text_level": 1, + "bbox": [ + 112, + 825, + 295, + 840 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In addition to core capabilities, foundation models must perform well in particular specialties and domains. Across our many areas of performance analyses, we have selected four domains for which to present benchmarking results: Software engineering, financial analysis, and retrieval-augmented generation. Prompt templates for all benchmarks can be found in Appendix B.3.", + "bbox": [ + 111, + 854, + 883, + 912 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 485, + 935, + 503, + 946 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/4925649280fa45f14562e7b6baa58f4947d95e95c004bfe6d7380dc0f925a1f9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SoftwareFinanceRAG
HumanEval PythonFinQACRAG
tok/sec0-shot pass@10-shot accuracyaccuracy
Nova Pro10089.0 ±4.877.2 ±0.950.3 ±1.9
Nova Lite15785.4 ±5.473.6 ±0.943.8 ±1.9
Nova Micro21081.1 ±6.065.2 ±1.043.1 ±1.9
Claude 3.5 Sonnet (Oct)5793.7 ±3.777.3 ±0.9M52.6 ±1.8M
Claude 3.5 Haiku6488.1 ±5.073.9 ±0.9M31.9 ±1.8M
Gemini 1.5 Pro (002)5887.8 ±5.0M74.4 ±0.9M48.9 ±1.9M
Gemini 1.5 Flash (002)19081.1 ±6.0M73.5 ±1.0M42.4 ±1.9M
Gemini 1.5 Flash 8B (001)28381.1 ±6.0M63.7 ±1.0M37.7 ±1.8M
GPT-4o16390.2 ±4.671.1 ±1.0M52.0 ±1.9M
GPT-4o Mini11387.2 ±5.170.6 ±1.0M49.9 ±1.9M
Llama 3.2 90B4080.5 ±6.172.8 ±1.0M45.2 ±1.9M
Llama 3.2 11B12472.6 ±6.860.8 ±1.1M42.2 ±1.9M
Llama 3.1 8B15772.6 ±6.861.2 ±1.0M42.2 ±1.8M
", + "bbox": [ + 189, + 88, + 808, + 425 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 7: Performance on select functional benchmarks, including software engineering benchmarks in Python with HumanEval [19], financial reasoning with FinQA [20], and retrieval augmented generation with CRAG [82]. CRAG uses our scoring method described in Section 2.4.3. Where available, reference numbers are taken from the corresponding benchmark papers and technical reports [13, 11, 32, 39, 45, 58]. Additional results were measured $(M)$ by $\\mathrm{us}^2$ . Model speed in tokens per second (Tok/Sec) is reproduced from section 2.5.", + "bbox": [ + 109, + 428, + 883, + 500 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "2.4.1 Software engineering", + "text_level": 1, + "bbox": [ + 112, + 540, + 316, + 555 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We assessed Amazon Nova's code generation capabilities on the Python coding task HumanEval [19]. The benchmark contains 164 original programming problems with unit tests. These problems assess language comprehension, algorithms, and simple mathematics. Some problems are comparable to simple software interview questions. Table 7 provides the performance of our Nova models and select public models.", + "bbox": [ + 109, + 565, + 883, + 625 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "2.4.2 Financial analysis", + "text_level": 1, + "bbox": [ + 112, + 643, + 292, + 657 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We use FinQA [20] to evaluate Amazon Nova's ability to understand financial data. FinQA is an expert-annotated dataset comprising 8,281 financial question-answer pairs derived from the earnings reports of S&P 500 companies. It evaluates a model's ability to extract information from both tables and unstructured text, while accurately performing calculations using relevant financial knowledge. We report the average post-rounding accuracy under the 0-shot CoT setting. Table 7 provides the performance of Amazon Nova models and select public models on FinQA.", + "bbox": [ + 109, + 669, + 883, + 741 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "2.4.3 Retrieval augmented generation", + "text_level": 1, + "bbox": [ + 112, + 760, + 390, + 776 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We evaluate RAG capabilities on the CRAG [82] benchmark using the Task 1 setup, which considers five pre-selected HTML pages as external knowledge to each input question. We extract top-20 text snippets from these pages following the standard retrieval approach used in CRAG's official repository, whereby pages are first cleaned using BeautifulSoup to remove HTML tags, after which the text is then split into sentences or chunks no longer than 1000 characters. These are then encoded using the sentence-transformers/all-MiniLM-L6-v2 model, which is also used to encode the question. The top 20 chunks with highest similarity are passed as context in the input for model inference. We report the percentage of correct responses as judged by an LLM (gpt-4-turbo-2024-04-09), which compares each model's answer with the expected answer using the prompt shown in Appendix B.3.2. Table 7 provides the performance of Amazon Nova models and selected public models on a combined validation and test set of 2,706 examples.", + "bbox": [ + 109, + 786, + 883, + 912 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "2.5 Runtime performance", + "text_level": 1, + "bbox": [ + 112, + 90, + 308, + 106 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We evaluate the runtime performance of Amazon Nova models using three metrics: Time to First Token (TTFT), Output Tokens per Second (OTPS) and Total Response Time. TTFT is measured as the time, in seconds, it takes to receive the first token from the model after an API request is sent. OTPS is measured as the number of tokens generated per second (tok/sec). It is the rate at which a model produces subsequent output tokens after the first token, reflecting overall throughput and efficiency during inference. Total Response Time measures the total duration in seconds from the submission of the input prompt to the end of generation sequence for a given input-output prompt length. It represents the overall user experience for a model.", + "bbox": [ + 109, + 116, + 883, + 214 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In Figure 3, we show TTFT, OTPS, and Total Response Time using 1000 tokens of input and 100 tokens of output for Amazon Nova models and select public models as reported by Artificial Analysis5, an independent entity that benchmarks AI models and hosting providers. Amazon Nova Micro, Lite and Pro models are among the fastest models in their respective intelligence tiers. Together, all three Amazon Nova models demonstrate state-of-the-art runtime performance, ensuring a smooth and responsive user experience in many real world use cases.", + "bbox": [ + 109, + 220, + 883, + 291 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 619, + 56 + ], + "page_idx": 12 + }, + { + "type": "page_footnote", + "text": "5https://artificialanalysis.ai/methodology", + "bbox": [ + 133, + 896, + 460, + 911 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/e774b73fa1e735c2e5327408e7138bc5a659ab582e779f6af3b14f94864a0daa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 145, + 844, + 378 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f66f29a61daabbd25d85f8a0f81690ced064d2fb9861e120cc24a4a32227f13c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 150, + 377, + 841, + 588 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/4754f9755877defa3dbe1fd9cd96eb21940d26e1151935c60631550c46e02dc5.jpg", + "image_caption": [ + "Figure 3: Time to First Token $(\\downarrow)$ , Output Tokens per Second $(\\uparrow)$ , and Total Response Time $(\\downarrow)$ using 1,000 tokens of input and 100 tokens of output for Amazon Nova models and select publicly-available models (Artificial Analysis, Nov 29th, 2024)." + ], + "image_footnote": [], + "bbox": [ + 153, + 590, + 841, + 801 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3 Amazon Nova Canvas Evaluation", + "text_level": 1, + "bbox": [ + 112, + 89, + 426, + 104 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Amazon Nova Canvas is a diffusion model that takes a text prompt and an optional RGB image as input and generates an image as an output conditioned on the input text and optional image. Illustrative examples of the images generated by Amazon Nova Canvas can be found in our Amazon Science blog post $^{6}$ . In this section, we provide details on the evaluation strategy and performance of the model both in terms of automated metrics and human evaluation.", + "bbox": [ + 109, + 119, + 883, + 176 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3.1 Automated metrics", + "text_level": 1, + "bbox": [ + 112, + 191, + 287, + 205 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We use ImageReward [80] and Text-to-Image Faithfulness (TIFA) [38] as automated metrics.", + "bbox": [ + 109, + 217, + 723, + 232 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ImageReward score is generated from a standardized reward model that aligns human preference with the predicted score. To compute the ImageReward score, we randomly sample 10k prompts from MSCOCO2014 [42] validation set and use this set for calculating the score.", + "- Text-to-Image Faithfulness (TIFA) score is a reference-free metric that measures the faithfulness of a generated image to the input text via visual question answering (VQA). The evaluation set for TIFA score is a pre-selected 4k prompts in the TIFA-v1.0 benchmark, sampled from MSCOCO captions [42], DrawBench [66], PartiPrompts [84], and PaintSkill [21] datasets." + ], + "bbox": [ + 156, + 243, + 883, + 347 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We compare Amazon Nova Canvas with other publicly-available models including DALL.E 3 [16], Stable Diffusion 3 Medium [27], Stable Diffusion 3.5 Large [28] and Flux (Schnell and Pro) [17]. The results are shown in Table 8.", + "bbox": [ + 109, + 356, + 883, + 386 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/1911bf0acd67bd15d0f2b53ab74ed3db72d3ea068b54888f5c0238b981b3c475.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TIFAImageReward
Amazon Nova Canvas0.8971.250
DALL.E 30.8631.052
Stable Diffusion 3.5 Large0.8911.082
Stable Diffusion 3 Medium0.8810.952
Flux Pro 1.00.8751.075
Flux Schnell0.8820.999
", + "bbox": [ + 308, + 397, + 687, + 534 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 8: Comparison of TIFA and ImageReward metrics of Amazon Nova Canvas with other models.", + "bbox": [ + 163, + 537, + 830, + 551 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3.2 Human evaluation", + "text_level": 1, + "bbox": [ + 112, + 585, + 282, + 599 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We conduct A/B testing to compare Amazon Nova Canvas with other third-party text-to-image models. The A/B testing prompt set is composed of approximately 1,000 prompts designed to capture customer usage of text-to-image models. This set includes prompts from datasets such as MSCOCO [42], Drawbench [66], OpenParti [84], DALL.E 3 Eval [16], and DOCCI [54] and covers a broad set of categories such as humans, landscapes, natural scenarios, indoor environments, creative themes, artistic themes, and so forth. A few prompts were randomly selected and repeated in order to get additional data points on the quality of the model.", + "bbox": [ + 109, + 611, + 883, + 696 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "With each prompt we generate an image from Amazon Nova Canvas as well as each other text-to-image model. We used random seeds to generate the images from Amazon Nova Canvas and all images were generated at $1\\mathrm{k}\\times 1\\mathrm{k}$ resolution. If the prompts trigger filters such that an image is not generated, for either the Amazon Nova Canvas model or the public text-to-image model, we ignore that prompt and do not show it to the human raters. All human evaluation is done in a single-blind manner where the annotator is provided two sets of images, one from Amazon Nova Canvas and the other from the third-party model. The order of the images are randomized for each prompt and annotator. In our blind testing, we ask human annotators to select images that they prefer based on (1) text-image alignment, which measures the instruction-following capability of the model, and (2) image quality, which quantifies the overall preference of the annotators. To ensure rigorous, consistent, and unbiased evaluation, we used a third-party vendor for human evaluation. We created guidelines that were used to train the annotators so that the decision-making criteria were clear to them in each dimension.", + "bbox": [ + 109, + 700, + 883, + 854 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The pair-wise results comparing Amazon Nova Canvas with OpenAI DALL.E 3 and Google Imagen 3 are shown in Table 9, including win, tie, loss rate. The win rate reflects the percentage of samples where Amazon Nova Canvas was", + "bbox": [ + 109, + 859, + 883, + 888 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 14 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://www.amazon.science/blog/amazon-nova-canvas-examples", + "bbox": [ + 132, + 897, + 602, + 911 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "preferred over the other model while the tie rate indicates the scenario where the human annotator did not perceive a difference between the two models. As can be seen in the results, Amazon Nova Canvas has a higher win rate compared to the other text-to-image models.", + "bbox": [ + 109, + 90, + 883, + 133 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/24cd5ce105f84b663074971e3ad6ce5928eb157a204bca249b33984a69b84712.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Nova Canvas versus:DALL.E 3Imagen 3
win ratetie rateloss ratewin ratetie rateloss rate
Overall preference (image quality)54.56.439.148.25.346.5
Instruction following (text-image alignment)39.422.538.138.428.133.5
", + "bbox": [ + 125, + 145, + 870, + 227 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 9: The win, tie, and loss rates (%) from human evaluation of Amazon Nova Canvas versus (a) DALL.E 3 and (b) Imagen 3.", + "bbox": [ + 111, + 229, + 883, + 260 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4 Amazon Nova Reel Evaluation", + "text_level": 1, + "bbox": [ + 111, + 297, + 403, + 313 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Amazon Nova Reel is a diffusion model that takes a text prompt and an optional RGB image as input and generates a video as an output conditioned on the input text and optional image. Illustrative examples of the videos generated by the Amazon Nova Reel can be found in our Amazon Science blog post.7 In this section, we provide details on the evaluation strategy and performance of the model.", + "bbox": [ + 109, + 328, + 883, + 383 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.1 Human evaluation metrics", + "text_level": 1, + "bbox": [ + 111, + 400, + 339, + 414 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To evaluate Amazon Nova Reel, we rely on human feedback to assess the generated videos across two primary axes: video quality and video consistency. All evaluations are conducted through single-blind pairwise comparisons. Human annotators are provided a set of two videos shown side-by-side and are asked to choose the better video or mark them as equal if they find the videos to be equally performant across the metric on which they are evaluating. All videos were generated in 720p resolution and different random seeds were used during generation.", + "bbox": [ + 109, + 425, + 885, + 496 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The video quality axis encapsulates the technical and perceptual aspects of the generated video via four primary components:", + "bbox": [ + 109, + 501, + 883, + 531 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Image quality: The visual appeal of individual frames, including resolution, sharpness, object clarity, and overall composition, where each frame is visually pleasing and artifact-free.", + "- Motion quality: The fluidity of movement across frames, including motion consistency and smooth transitions without flickering, distortion, or abrupt shifts, contributing to natural and realistic motion portrayal.", + "- Image-text alignment: How closely individual frames match the prompt, considering the presence of described entities, their attributes, spatial relationships, colors, and other static visual details.", + "- Motion-text alignment: The accuracy of dynamic elements, including the correctness of actions performed by entities, camera movements, and temporal changes in attributes, as well as adherence to the provided description." + ], + "bbox": [ + 156, + 541, + 879, + 680 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The video quality axis additionally includes factors influencing overall appeal, such as motion degree, entity size, creative composition, and general video likability.", + "bbox": [ + 109, + 691, + 883, + 722 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The video consistency axis encapsulates the temporal coherence of both subjects and backgrounds throughout the video. It includes assessments of the maintenance of entity size, shape, and appearance, as well as background stability without unexpected morphing or changes. A high score in this dimension means believable spatial relationships between foreground and background elements throughout the video duration.", + "bbox": [ + 109, + 726, + 883, + 784 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In combination, the video quality and video consistency metrics provide a holistic and robust evaluation framework for video generation models by considering both technical accuracy and perceptual appeal.", + "bbox": [ + 109, + 789, + 883, + 819 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.2 Dataset", + "text_level": 1, + "bbox": [ + 111, + 833, + 207, + 847 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We curated a diverse set of prompts designed to capture various aspects of video generation. The prompts are distributed across 6 broad categories: human and activities, animals, natural scenery and landscapes, indoor scenes, objects", + "bbox": [ + 109, + 859, + 883, + 888 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "7https://www.amazon.science/blog/amazon-nova-reel-examples", + "bbox": [ + 133, + 897, + 584, + 911 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "interactions, and creative scenes and activities. This broad categorization ensures that the evaluation covers a wide range of real-world scenarios. We structured the prompt set to cover various motion-related aspects, which is critical for assessing motion-text alignment in the generated videos. For example, we included prompts with a variety of camera motions to evaluate how well the models follow instructions related to camera movement. Additionally, we incorporated dynamic attributes [71], in which the subject or background undergoes state or shape changes over time, which allows us to evaluate the model's ability to generate evolving entities. Finally, we added prompts that require motion binding [71], where specific compositions of movements and actions are requested, enabling us to assess how well models can generate complex, coordinated motions. The curated prompt set consists of approximately 700 prompts, all from various open source benchmarks.", + "bbox": [ + 109, + 90, + 883, + 217 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "4.3 Implementation details & results", + "text_level": 1, + "bbox": [ + 112, + 232, + 382, + 247 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To ensure a rigorous, consistent and unbiased evaluation process, we outsourced the annotation collection process to a third-party vendor. We created detailed guidelines, in which annotators were given comprehensive instructions and examples for each evaluation dimension, ensuring clarity on the criteria for marking preferences between videos. These guidelines included examples of different scenarios to aid in decision-making across our evaluation axes. Alongside this, we ensured that annotators were trained using expert-provided examples, with each round of annotations subject to spot checks. Specifically, $5 - 10\\%$ of the data from each batch was randomly selected and reviewed by expert annotators. Based on this feedback, the vendor continuously refined the annotators' understanding and accuracy, ensuring a high standard of evaluation across the board. To further enhance the reliability of the results, we employed a consensus voting system. For each video comparison, annotations were collected from three different evaluators, and a majority voting approach was used to determine the final outcome. This method helps reduce individual biases and ensures that the final assessments are based on collective judgment, thereby increasing the robustness of the evaluation.", + "bbox": [ + 109, + 258, + 883, + 411 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For reporting performance, we conducted pairwise comparisons between Amazon Nova Reel and other state-of-the-art models including Gen3 Alpha [65] by Runway ML and Luma 1.6 [47] by Luma Labs. We report results in terms of win, tie, and loss rates. The win rate reflects the percentage of samples where Amazon Nova Reel was preferred over the other model, while the tie rate indicates cases where no perceptible difference between the two models was found by the evaluators. Using the curated prompt set described earlier, we evaluate the models across all the dimensions outlined above, and report the results in Table 10.", + "bbox": [ + 109, + 416, + 883, + 501 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/460ae686ce3b421bcd1418395cc5925da64a59db4ea8972139858cf04c0e9636.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Nova Reel versus:Runway Gen3 AlphaLuma 1.6
win ratetie rateloss ratewin ratetie rateloss rate
Video Quality56.49.933.751.13.445.5
Video Consistency67.09.123.974.75.120.2
", + "bbox": [ + 210, + 512, + 787, + 593 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 10: The win, tie, and loss rates $(\\%)$ from human evaluation of Amazon Nova Reel versus (a) Gen3-Alpha and (b) Luma1.6.", + "bbox": [ + 109, + 597, + 883, + 625 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In video consistency, Amazon Nova Reel achieved win rates of $67.0\\%$ against Gen3 Alpha and $74.7\\%$ against Luma 1.6, demonstrating superior subject and background coherence. For video quality, Amazon Nova Reel secured win rates of $56.4\\%$ against Gen3 Alpha and $51.1\\%$ against Luma 1.6.", + "bbox": [ + 109, + 650, + 883, + 694 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5 Responsible AI", + "text_level": 1, + "bbox": [ + 112, + 712, + 274, + 729 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Our approach to Responsible AI (RAI) is structured around eight foundational dimensions [10] shown in Table 11. These dimensions guide our approach to RAI for the Amazon Nova family of models, which we articulate in the following three sections: (1) defining our RAI design objectives, (2) our actions to ensure adherence to these objectives, and (3) system evaluation and red teaming. The last two components form a continuous loop of model development and human/automated verification to ensure that our Amazon Nova models are aligned with our RAI objectives and deliver an exceptional and delightful customer experience.", + "bbox": [ + 109, + 743, + 883, + 828 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.1 Defining our RAI objectives", + "text_level": 1, + "bbox": [ + 112, + 844, + 349, + 859 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We operationalize our RAI dimensions into a series of detailed design objectives that guide our decision-making throughout the entire model development lifecycle, from initial data collection and pre-training to the implementation of post-deployment runtime mitigations.", + "bbox": [ + 109, + 869, + 883, + 912 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 617, + 56 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/55f6921dfdfd6300177137ccb747563b22ab1a28a8d7fd0686e6d8523cfcaf2e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TermDefinition
FairnessConsidering impacts on different groups of stakeholders
ExplainabilityUnderstanding and evaluating system outputs
Privacy and securityAppropriately obtaining, using, and protecting data and models
SafetyPreventing harmful system output and misuse
ControllabilityHaving mechanisms to monitor and steer AI system behavior
Veracity and robustnessAchieving correct system outputs, even with unexpected or adversarial inputs
GovernanceIncorporating best practices into the AI supply chain, including providers and deployers
TransparencyEnabling stakeholders to make informed choices about their engagement with an AI system
", + "bbox": [ + 114, + 88, + 883, + 291 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 11: Our eight core Responsible AI dimensions", + "bbox": [ + 323, + 294, + 671, + 309 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In addition to being grounded on the RAI dimensions, our objectives are informed by relevant laws and regulations, voluntary frameworks, and our commitments to our customers, and they undergo an internal alignment process that includes reviews from a number of stakeholders. We will continue to iterate on these objections as we engage with external experts and participate in industry and government forums, including the Frontier Model Forum [29], Partnership on AI [5], and various forums organized by government agencies such as the National Institute of Standards and Technology (NIST) of the U.S. Department of Commerce [7].", + "bbox": [ + 109, + 349, + 883, + 434 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Our commitment to Responsible Scaling: As the capabilities of AI models increase (through increased training data, model size or architecture innovations), so do the potential risks that they present. We joined other technology companies in signing on to the White House's voluntary commitments on the safe, secure, and transparent development and use of foundation models [6]. Since then we have actively participated in other efforts, including the AI Safety Summits in the UK and Seoul, and we have committed to new standards like the G7 AI Hiroshima Process Code of Conduct [30] in accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. We also started a partnership with the Model Evaluation and Threat Research (METR) center8 to enrich our Controllability design objectives.", + "bbox": [ + 109, + 453, + 883, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5.2 Ensuring adherence to RAI objectives", + "text_level": 1, + "bbox": [ + 112, + 585, + 419, + 602 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We employed a number of methods to measure and ensure compliance for each of our core RAI dimensions depending on their scope (i.e., whether they apply to model output, data management or other processes). For the dimensions that govern model behavior (Safety, Fairness, Veracity and Robustness, Controllability, and Privacy and Security), we curated the pre-training data and we used both Supervised Fine Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methods to align our models. Based on the objectives for each RAI dimension, we created single- and multi-turn RAI demonstrations in multiple languages and conducted helpfulness/harmfulness studies to decide on SFT data mixes. We collected human preference data to be used as inputs to RLHF training where we also provided an RAI-specific reward model. We also identify risk areas during our offline evaluation or red teaming exercises (Section 5.4) and collect semantically similar examples to be included in future SFT and RLHF rounds.", + "bbox": [ + 109, + 614, + 883, + 739 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In addition to the RAI model alignment, we built runtime input and output moderation models which serve as a first and last line of defense and allow us to respond more quickly to newly identified threats or gaps in model alignment. The main role of the input moderation model is to detect prompts that contain malicious, insecure or illegal material, or attempt to bypass the core model alignment (prompt injection, jailbreaking). Similarly, the output moderation ensures that the content adheres to our RAI objectives.", + "bbox": [ + 109, + 744, + 883, + 816 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We have a rigorous Governance methodology, developing our models in a working-backwards product process that incorporates RAI at the design phase, design consultations and implementation assessments by dedicated RAI science and data experts, and includes routine testing, reviews with customers, best practice development, dissemination, and training.", + "bbox": [ + 109, + 821, + 883, + 878 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "8https://metr.org/", + "bbox": [ + 133, + 896, + 274, + 911 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We work to ensure that our Privacy and Security objectives are adhered to for both the model and training data. In addition to the model output alignment described above, we take measures that include data access controls [9] protecting our model training data, resulting weights, and model versions, and watermarking model outputs (see below). We address the latter through several layers of defense, including de-identifying or removing certain types of personal data from our training data, when feasible, as well as evaluation through red teaming exercises that cover data privacy assessments.", + "bbox": [ + 109, + 90, + 883, + 174 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For Explainability of our models' outputs we conduct and leverage the current active research in the area of Explainable AI to deeply understand our models' current behavior, their potential future behavior, and to build capabilities to continuously correct their behavior as and when necessary. We use various explainable AI methods throughout our model development to guide our decisions regarding RAI alignment and other mitigations. Services like Clarify [8] also enable our downstream developers to easily explain model predictions.", + "bbox": [ + 109, + 180, + 883, + 253 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "To work to ensure our models' Robustness against adversarial inputs such as those that attempt to bypass alignment guardrails, we focused on risks applicable to both developers building applications using our models, and users interacting with our models via those applications. We organized those risks in broad categories such as sensitive data exfiltration, execution of unauthorized action, degradation of run-time model service availability, and malicious content generation. We used this risk organization to build model resiliency against interactions that lead to the prioritized risks.", + "bbox": [ + 109, + 256, + 883, + 328 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Finally, to maximize Transparency, we incorporate an invisible watermark during the image or video generation process and add $\\mathrm{C2PA}^9$ metadata in all Canvas generated content. We enhanced the robustness to alterations like rotation, resizing, color inversion, and flipping. For videos, we embed our watermark in each frame and ensure that our watermarking and detection methods withstand H264 compression. To enable anyone to easily detect the watermarks in Amazon Nova generated content, an API will be available soon after launch. Our watermark detection system introduces several enhancements such as making confidence score-based predictions instead of a single binary prediction that reflects the extent to which the generated content has been edited even when using external tools. The new detection system covers both images and videos.", + "bbox": [ + 109, + 332, + 883, + 444 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "5.3 RAI Evaluation", + "text_level": 1, + "bbox": [ + 112, + 460, + 264, + 474 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Throughout model development we perform extensive RAI evaluations using publicly available benchmarks like BOLD [25], RealToxicityPrompts [31], and MM-SafetyBench [44]. We also built a series of proprietary, dynamically updating benchmarks. To build them, our internal data annotation team created a diverse set of examples for each of our RAI dimensions. In addition, we leveraged subject-matter experts in specific areas, such as Security and Controllability, to collect adversarial prompts. We continued updating and enhancing each dataset based on evaluation and red teaming results (see Section 5.4 for more details on red teaming). This kept the internal benchmarks evergreen, avoiding overfitting during development, but also made sure the models do not regress against previously identified risks. Our datasets comprise inputs in multiple languages and multiple modalities, and contain single-turn and multi-turn conversation examples.", + "bbox": [ + 109, + 484, + 883, + 609 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "5.4 Red Teaming", + "text_level": 1, + "bbox": [ + 112, + 627, + 246, + 642 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Static benchmarks give us a view of how well models perform per RAI dimension against a user's \"plain\" intent (i.e. the prompts explicitly state the intent of the user to generate prohibited content). To test our models' resilience against techniques that mask the users' intent we rely on red teaming. We employed a multi-pronged evaluation strategy consisting of internal red teaming, red teaming with third party and subject matter experts and, automated red teaming.", + "bbox": [ + 109, + 652, + 883, + 710 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "5.4.1 Internal Red Teaming", + "text_level": 1, + "bbox": [ + 112, + 724, + 321, + 739 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We used a team of trained data analysts and subject-matter experts to perform regular red teaming exercises to evaluate the model's robustness against adversarial prompts across all our RAI dimensions. We enhanced the diversity of manually curated adversarial prompts by employing linguistic, structural, and modality based prompt mutation techniques, assessing each mutation for its effectiveness at generating a response that does not adhere to our RAI objectives, likelihood of its success, and the technique's novelty to a model revision. In total, we identified and developed over 300 distinct techniques (see Figure 4), and tested techniques individually and via chaining various combinations. The attacks covered multiple languages and modalities, targeting each language/modality individually and in combination. We designed cross-modality attacks, such as embedding adversarial content within seemingly benign visual inputs, to evaluate the models' ability to handle complex scenarios involving multiple input types. Where appropriate, we implemented automation to further improve the diversity, reliability, and efficiency of red teaming.", + "bbox": [ + 109, + 748, + 883, + 887 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 18 + }, + { + "type": "page_footnote", + "text": "9https://c2pa.org/", + "bbox": [ + 133, + 896, + 274, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/744fe999753b87a0d695ae52fa9855a52df774d240e85120dba86e378b9958a8.jpg", + "image_caption": [ + "Figure 4: Broad taxonomy and count of attack techniques we use for our red teaming exercises" + ], + "image_footnote": [], + "bbox": [ + 156, + 89, + 844, + 429 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "After each round of red teaming, we gathered feedback from the team regarding failure patterns which guided the next stage of the model development.", + "bbox": [ + 111, + 489, + 883, + 518 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "5.4.2 External Red Teaming", + "text_level": 1, + "bbox": [ + 112, + 539, + 323, + 555 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "In accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Artificial Intelligence, we partner with a variety of third parties to conduct red teaming against our AI models. These initiatives are in addition to our extensive in-house efforts, which includes all aspects of Cybersecurity red teaming. Just like with our internal red teaming efforts, we iterated during the model development based on feedback from these institutions to improve the RAI adherence of our models. We leverage red-teaming firms including ActiveFence to conduct testing in areas such as hate speech, political misinformation, extremism and other RAI dimensions. We also work with specialized third parties to red team our models for Chemical, Biological, Radiological and Nuclear (CBRN) capabilities. Our work with Deloitte Consulting, tests our AI models' capabilities in Biological risks and harms. Our work with Nemesys Insights LLC tests our AI models' capabilities in the Radiological and Nuclear domains. We also work with the Gomes Group at Carnegie Mellon University to test our models' capabilities in Chemistry and chemical compounds. Each of these partners was carefully selected based on their industry leadership, previous/parallel red teaming work with other AI model developers, and their contributions to evolving government and industry standards around CBRN and overall AI safety. We provide a brief summary of expertise of each of these vendors and their testing methodology below.", + "bbox": [ + 111, + 566, + 883, + 760 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "ActiveFence: ActiveFence is a team of over 150 subject matter experts providing AI Safety and Content Moderation solutions. The team produced over 9,700 adversarial prompts, distributed over 20 categories, including content-targeted red teaming (evaluating the model's ability to generate harmful or inappropriate content), and security-targeted red teaming (assessing the model's resilience against malicious attempts to manipulate its behavior or extract sensitive information).", + "bbox": [ + 111, + 766, + 883, + 835 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Deloitte: The evaluation team at Deloitte Consulting LLP (formerly known as Gryphon Scientific) has unique experience at the intersection of artificial intelligence and biology. The primary thrust of this effort involved evaluating the model against a panel of 30 questions developed to test an LLM's scientific knowledge and reasoning capabilities that could facilitate the development or use of biological weapons. The model's responses to these questions were evaluated for their scientific accuracy and utility to someone seeking to do harm with biology. After completing the initial", + "bbox": [ + 111, + 842, + 883, + 912 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "evaluations, the Deloitte team probed more deeply into the questions the LLM originally replied with potentially concerning information.", + "bbox": [ + 109, + 90, + 883, + 119 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Gomes Group: The Gomes Group at Carnegie Mellon University is at the forefront of integrating advanced artificial intelligence into chemical research. Their evaluation framework consisted of both automated and non-automated assessments. Two non-automated evaluations explored aggregation attack vulnerabilities through purchasing and remote chemical mixing scenarios. The automated evaluations utilized two distinct datasets: one containing 39 hazardous chemicals (including DEA Schedule I, II, and chemical warfare agents) and another with 362 common chemicals for NFPA diamond classifications. Three primary automated evaluations were conducted using the hazardous chemicals dataset. The NFPA diamond evaluation comprised 1,810 prompts, testing both single-turn and multi-turn approaches with consistent accuracy across both methods.", + "bbox": [ + 109, + 126, + 883, + 238 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Nemesys: Nemesys Insights LLC run uplift studies, red teaming exercises, and risk assessments for a variety of technology companies and third-party research entities to assess national security related risks of large language models and other generative AI tools. For their testing, they started with human red teaming exercises focused on non-state acquisition or use of illicit radiological/nuclear (RN) materials, followed by prompt-response evaluation and uplift studies. The exercises comprised two different scenarios (a. violent non-state actor acquisition and use of Cobalt-60; b. non-state actor acquisition and international transport of HEU [highly enriched uranium]), and utilized 8 subject matter experts with operational and technological knowledge in a 2-team x 2-scenario design to construct and refine threat plans across a 6-hour planning cycle.", + "bbox": [ + 109, + 242, + 883, + 354 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "5.4.3 Automated Red Teaming", + "text_level": 1, + "bbox": [ + 112, + 368, + 341, + 383 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Finally, to augment human based red teaming, we built an automated red teaming mechanism by adapting our (Feedback Loop In-context Red Teaming) FLIRT [52] framework. This approach helped us scale red teaming and repeat red teaming efficiently. FLIRT uses a list of seed prompts that have been identified by human evaluators as potentially violating one or more of our RAI dimensions. For every dimension, a subset of seeds is used to generate additional prompts with a dedicated language model, called red-LM, through in-context-learning (ICL) [18] and a carefully crafted set of instructions. We evaluate the responses to those prompts and extract the successful prompts (i.e., the ones triggering a prohibited response) for the next round of generation. The above steps are repeated for a chosen number of iterations across all RAI categories. We use our automated red teaming mechanism to evaluate both RAI adherence robustness and false refusals. We use the mechanism to generate adversarial tests across multi-turn interactions, multiple languages, and multiple input/output modalities to uncover and correct robustness issues in our models due to potential adversarial content in such interactions and inputs.", + "bbox": [ + 109, + 391, + 883, + 544 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "6 Training Infrastructure", + "text_level": 1, + "bbox": [ + 112, + 563, + 346, + 580 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The Nova family of models were trained on Amazon's custom Trainium1 (TRN1) chips, $^{10}$ NVidia A100 (P4d instances), and H100 (P5 instances) accelerators. Working with AWS SageMaker, we stood up NVidia GPU and TRN1 clusters and ran parallel trainings to ensure model performance parity, while optimizing training throughput on the different stacks. All clusters utilize petabit-scale non-blocking EFA network fabric which is less prone to packet loss than other network transport protocols $^{11}$ and provides the highest network bandwidth with H100 accelerators compared to any other instance type available on AWS EC2 $^{12}$ . We conducted distributed training on AWS SageMaker-managed Elastic Kubernetes Service (EKS) clusters, and utilized AWS File System X (FSx) and Simple Storage Solution (S3) for data and checkpoint IO. While FSx offers performant and convenient storage for large scale training jobs, S3 allowed cost-efficient scaling to large multimodal datasets and model checkpoints.", + "bbox": [ + 109, + 594, + 883, + 719 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Goodput achieved weekly average values of up to $97\\%$ in pretraining runs through optimizations targeting lower job failure rate, minimizing checkpoint overhead, and overall reduction in the Mean Time to Restart (MTTR). This time is inclusive of time from the last successful checkpoint before training interruption, time taken to restart components of the system and resume training at steady state from checkpoint. Techniques such as fully distributed optimizer state and weight sharding and the elimination of all blocking overhead associated with checkpoint persistence resulted in a reduction of checkpointing overhead to $\\sim 1$ sec on H100 clusters, and $\\sim 0.1$ sec on TRN1 clusters. We exceeded our MTTR target of 9 minutes and achieved an average of 6.5 minutes on our TRN1 clusters by optimizing the", + "bbox": [ + 109, + 724, + 883, + 824 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 20 + }, + { + "type": "page_footnote", + "text": "$^{10}$ https://aws.amazon.com/blogs/aws/amazon-ec2-trn1-instances-for-high-performance-model-training-g-are-now-available/", + "bbox": [ + 111, + 830, + 883, + 858 + ], + "page_idx": 20 + }, + { + "type": "page_footnote", + "text": "11https://www.amazon.science/publications/a-cloud-optimized-transport-protocol-for-elastic-and-scalable-hpc", + "bbox": [ + 112, + 859, + 880, + 883 + ], + "page_idx": 20 + }, + { + "type": "page_footnote", + "text": "$^{12}$ https://aws.amazon.com/blogs/aws/new-amazon-ec2-p5-instances-powered-by-nvidia-h100-tensor-core-gpus-for-accelerating- generative-ai-and-hpc-applications/", + "bbox": [ + 112, + 883, + 880, + 910 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 485, + 935, + 503, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "node communication initialization in the training startup process and reduced time to load checkpoints through an asynchronous observer process. This process maps each latest checkpoint file to its corresponding node in the cluster. When resuming from the checkpoint, each node only loads the checkpoint files for its corresponding rank, reducing the time taken to discover the latest checkpoint from 3 minutes to 5 seconds. We also cache and reuse data indices to optimize training data loading initialization time. These improvements reduced data loading initialization to 205ms per restart.", + "bbox": [ + 114, + 90, + 883, + 174 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To increase training efficiency we developed a new activation checkpointing scheme called Super-Selective Activation Checkpointing (SSC). SSC minimizes activation re-computation in memory-constrained environments, reducing memory consumption by $\\sim 50\\%$ while adding $\\sim 2\\%$ re-computation overhead compared to NVidia's Selective Checkpointing. We also found optimizations in default gradient reduction behavior and the default PyTorch memory allocator behavior. The default gradient reduction behavior leads to suboptimal communication overlap and we found the synchronous nature of the default PyTorch allocation led to stragglers in collectives resulting in multiple stalled workers. We adjusted the gradient reduction order and frequency, allowing us to overlap the majority of data parallelism communication.", + "bbox": [ + 114, + 180, + 883, + 279 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 617, + 56 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 89, + 209, + 104 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Efficient Batch Computing - AWS Batch - AWS, 2024. URL https://aws.amazon.com/batch/.", + "[2] Big Data Platform - Amazon EMR - AWS, 2024. URL https://aws.amazon.com/emr/.", + "[3] AgentStudio. Gemini flash. https://computer-agents.github.io/agent-studio/, 2024. Accessed: 2024-11-29.", + "[4] P. Agrawal, S. Antoniak, E. B. Hanna, B. Bout, D. Chaplot, J. Chudnovsky, D. Costa, B. D. Monicault, S. Garg, T. Gervet, S. Ghosh, A. Héliou, P. Jacob, A. Q. Jiang, K. Khandelwal, T. Lacroix, G. Lample, D. L. Casas, T. Lavril, T. L. Scao, A. Lo, W. Marshall, L. Martin, A. Mensch, P. Muddireddy, V. Nemychnikova, M. Pellat, P. V. Platen, N. Raghuraman, B. Rozière, A. Sablayrolles, L. Saulnier, R. Sauvestre, W. Shang, R. Soletskyi, L. Stewart, P. Stock, J. Studnia, S. Subramanian, S. Vaze, T. Wang, and S. Yang. Pixtral 12B, 2024. URL https://arxiv.org/abs/2410.07073.", + "[5] Amazon. Amazon joins Partnership on AI. https://www/aboutamazon.com/news/amazon-ai/amazon-joints-partnership-on-ai, 2016. Accessed: 2024-11-20.", + "[6] Amazon. Our commitment to the responsible use of AI. https://www/aboutamazon.com/news/company-news/amazon-responsible-ai, 2023. Accessed: 2024-11-20.", + "[7] Amazon. Amazon joins US Artificial Intelligence safety institute to advance responsible AI. https://www.abou tamazon.com/news/policy-news-views/amazon-joins-us-artificial-intelligence-safety-i nstitute-to-advance-responsible-ai, 2024. Accessed: 2024-11-20.", + "[8] Amazon. Amazon SageMaker Clarify. https://aws.amazon.com/sagemaker/clarify/, 2024. Accessed: 2024-11-20.", + "[9] Amazon. Data protection & privacy at AWS. https://aws.amazon.com/compliance/data-protection/, 2024. Accessed: 2024-11-20.", + "[10] Amazon. Building AI responsibly at AWS. https://aws.amazon.com/ai/responsible-ai/, 2024. Accessed: 2024-11-20.", + "[11] Anthropic. The Claude 3 model family: Opus, Sonnet, Haiku. Technical report, Anthropic, 2023. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf.", + "[12] Anthropic. Claude Sonnet. https://www.anthropic.com/claude/sonnet, 2024. Accessed: 2024-11-20.", + "[13] Anthropic AI. Claude 3.5 Sonnet model card addendum. Technical report, 2024.", + "[14] Anthropic AI Team. Claude 3.5 Haiku and upgraded Claude 3.5 Sonnet, 2024. URL https://assets.anthropic.com/m/1cd9d098ac3e6467/original/Claude-3-Model-Card-October-Addendum.pdf.", + "[15] S. Arora and B. Barak. Computational complexity: a modern approach. Cambridge University Press, 2009.", + "[16] J. Betker, G. Goh, L. Jing, T. Brooks, J. Wang, L. Li, L. Ouyang, J. Zhuang, J. Lee, Y. Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023.", + "[17] Black Forest Labs. Flux models. 2024. URL https://github.com/black-forest-labs/flux.", + "[18] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.", + "[19] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman, et al. Evaluating large language models trained on code, 2021.", + "[20] Z. Chen, W. Chen, C. Smiley, S. Shah, I. Borova, D. Langdon, R. N. Moussa, M. I. Beane, T.-H. K. Huang, B. R. Routledge, and W. Y. Wang. FinQA: A dataset of numerical reasoning over financial data. ArXiv, abs/2109.00122, 2021. URL https://api-semanticscholar.org/CorpusID:235399966.", + "[21] J. Cho, A. Zala, and M. Bansal. DALL-eval: Probing the reasoning skills and social biases of text-to-image generation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3043-3054, 2023." + ], + "bbox": [ + 112, + 112, + 883, + 910 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[22] P. Clark, I. Cowhey, O. Etzioni, T. Khot, A. Sabharwal, C. Schoenick, and O. Tafjord. Think you have solved question answering? try ARC, the AI2 reasoning challenge. arXiv:1803.05457v1, 2018.", + "[23] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano, C. Hesse, and J. Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "[24] X. Deng, Y. Gu, B. Zheng, S. Chen, S. Stevens, B. Wang, H. Sun, and Y. Su. Mind2Web: Towards a generalist agent for the web. In NeurIPS, 2023.", + "[25] J. Dhamala, T. Sun, V. Kumar, S. Krishna, Y. Pruksachatkun, K.-W. Chang, and R. Gupta. BOLD: Dataset and metrics for measuring biases in open-ended language generation. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21, page 862-872, New York, NY, USA, 2021. Association for Computing Machinery. ISBN 9781450383097. doi: 10.1145/3442188.3445924. URL https://doi.org/10.1145/3442188.3445924.", + "[26] D. Dua, Y. Wang, P. Dasigi, G. Stanovsky, S. Singh, and M. Gardner. DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs. In Proc. of NAACL, 2019.", + "[27] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3-medium.", + "[28] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Stable Diffusion 3.5. 2024. URL https://stability.ai/news/introducing-stable-diffusion-3-5.", + "[29] Frontier Model Forum. Amazon and Meta join the Frontier Model Forum to promote AI safety. https://www.frontiermodelforum.org/updates/amazon-and-meta-join-the-frontier-model-forum-t-o-promote-ai-safety/, 2024. Accessed: 2024-11-20.", + "[30] G7 Hiroshima Summit. Hiroshima process international code of conduct for organizations developing advanced AI systems. https://www.mofa.go.jp/files/100573473.pdf, 2023. Accessed: 2024-11-20.", + "[31] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith. RealToxicityPrompts: Evaluating neural toxic degeneration in language models. In T. Cohn, Y. He, and Y. Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 3356-3369, Online, Nov. 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.findings-emnlp.301. URL https://aclanthology.org/2020-findings-emnlp.301.", + "[32] Gemini Team. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024. URL https://arxiv.org/abs/2403.05530.", + "[33] Google Deepmind. Gemini Flash. https://deepmind.google/technologies/gemini/flash/, 2024. Accessed: 2024-11-20.", + "[34] N. Goyal, C. Gao, V. Chaudhary, P.-J. Chen, G. Wenzek, D. Ju, S. Krishnan, M. Ranzato, F. Guzmán, and A. Fan. The FLORES-101 evaluation benchmark for low-resource and multilingual machine translation. 2021.", + "[35] F. Guzmán, P.-J. Chen, M. Ott, J. Pino, G. Lample, P. Koehn, V. Chaudhary, and M. Ranzato. Two new evaluation datasets for low-resource machine translation: Nepali-english and sinhala-english. 2019.", + "[36] D. Hendrycks, C. Burns, S. Basart, A. Zou, M. Mazeika, D. Song, and J. Steinhardt. Measuring massive multitask language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ.", + "[37] D. Hendrycks, C. Burns, S. Kadavath, A. Arora, S. Basart, E. Tang, D. Song, and J. Steinhardt. Measuring mathematical problem solving with the MATH dataset. NeurIPS, 2021.", + "[38] Y. Hu, B. Liu, J. Kasai, Y. Wang, M. Ostendorf, R. Krishna, and N. A. Smith. TIFA: Accurate and interpretable text-to-image faithfulness evaluation with question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20406-20417, 2023.", + "[39] R. Islam and O. M. Moushi. GPT-4o: The cutting-edge advancement in multimodal LLM. Technical report, 2024." + ], + "bbox": [ + 112, + 90, + 883, + 912 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] G. Kamradt. LLMTest NeedleInAHaystack, 2023. URL https://github.com/gkamradt/LLMTestNeedleInAHaystack/blob/main/README.md.", + "[41] D. P. Kingma. Auto-encoding variational Bayes. 2nd International Conference on Learning Representations, ICLR, 2014.", + "[42] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollár, and C. L. Zitnick. Microsoft COCO: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014.", + "[43] J. Liu, Y. Song, B. Y. Lin, W. Lam, G. Neubig, Y. Li, and X. Yue. VisualWebBench: How far have multimodal llms evolved in web page understanding and grounding?, 2024.", + "[44] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao. MM-SafetyBench: A benchmark for safety evaluation of multimodal large language models. In A. Leonardis, E. Ricci, S. Roth, O. Russakovsky, T. Sattler, and G. Varol, editors, Computer Vision – ECCV 2024, pages 386–403, Cham, 2025. Springer Nature Switzerland. ISBN 978-3-031-72992-8.", + "[45] Llama Team, AI Meta. The Llama 3 herd of models, 2024. URL https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md.", + "[46] P. Lu, B. Peng, H. Cheng, M. Galley, K.-W. Chang, Y. N. Wu, S.-C. Zhu, and J. Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In The 37th Conference on Neural Information Processing Systems (NeurIPS), 2023.", + "[47] Luma Labs, 2024. URL https://lumalabs.ai/dream-machine.", + "[48] L. Madaan, A. K. Singh, R. Schaeffer, A. Poulton, S. Koyejo, P. Stenetorp, S. Narang, and D. Hupkes. Quantifying variance in evaluation benchmarks, 2024. URL https://arxiv.org/abs/2406.10229.", + "[49] K. Mangalam, R. Akshulakov, and J. Malik. EgoSchema: A diagnostic benchmark for very long-form video language understanding. In NeurIPS, 2023.", + "[50] A. Masry, D. X. Long, J. Q. Tan, S. Joty, and E. Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In ACL Findings, 2022.", + "[51] M. Mathew, D. Karatzas, and C. Jawahar. DocVQA: A dataset for VQA on document images. In WACV, 2021.", + "[52] N. Mehrabi, P. Goyal, C. Dupuy, Q. Hu, S. Ghosh, R. Zemel, K.-W. Chang, A. Galstyan, and R. Gupta. FLIRT: Feedback loop in-context red teaming. In EMNLP 2024, 2024. URL https://www.amazon.science/publications/flirt-feedback-loop-in-context-red-teaming.", + "[53] Meta. Llama 3.2 Github model card vision. https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md#instruction-tuned-models, 2024. Accessed: 2024-11-20.", + "[54] Y. Onoe, S. Rane, Z. Berger, Y. Bitton, J. Cho, R. Garg, A. Ku, Z. Parekh, J. Pont-Tuset, G. Tanzer, et al. DOCCI: Descriptions of connected and contrasting images. URL https://arxiv.org/abs/2404.19753.", + "[55] OpenAI. GPT 4o mini. https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence, 2024. Accessed: 2024-11-20.", + "[56] OpenAI. Hello GPT 4o. https://openai.com/index/hello-gpt-4o, 2024. Accessed: 2024-11-20.", + "[57] OpenAI Team. simple evals GPT4, 2024. URL https://github.com/openai/simple-evals.", + "[58] OpenAI Team. o1 mini system card, 2024. URL https://cdn.openai.com/o1-system-card-20240917.pdf.", + "[59] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe. Training language models to follow instructions with human feedback. In Advances in Neural Information Processing Systems, volume 35, pages 27730-27744, 2022." + ], + "bbox": [ + 112, + 90, + 883, + 912 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[60] S. G. Patil, T. Zhang, X. Wang, and J. E. Gonzalez. Gorilla: Large language model connected with massive APIs, 2023. URL https://arxiv.org/abs/2305.15334.", + "[61] W. Peebles and S. Xie. Scalable diffusion models with transformers. In ICCV, 2023.", + "[62] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. In Thirty-seventh Conference on Neural Information Processing Systems, 2023.", + "[63] R. Rei, J. G. C. de Souza, D. Alves, C. Zerva, A. C. Farinha, T. Glushkova, A. Lavie, L. Coheur, and A. F. T. Martins. COMET-22: Unbabel-IST 2022 submission for the metrics shared task. In P. Koehn, L. Barrault, O. Bojar, F. Bougares, R. Chatterjee, M. R. Costa-jussa, C. Federmann, M. Fishel, A. Fraser, M. Freitag, Y. Graham, R. Grundkiewicz, P. Guzman, B. Haddow, M. Huck, A. Jimeno Yepes, T. Kocmi, A. Martins, M. Morishita, C. Monz, M. Nagata, T. Nakazawa, M. Negri, A. Néveol, M. Neves, M. Popel, M. Turchi, and M. Zampieri, editors, Proceedings of the Seventh Conference on Machine Translation (WMT), pages 578–585, Abu Dhabi, United Arab Emirates (Hybrid), Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.wmt-1.52.", + "[64] D. Rein, B. L. Hou, A. C. Stickland, J. Petty, R. Y. Pang, J. Dirani, J. Michael, and S. R. Bowman. GPQA: A graduate-level google-proof Q&A benchmark, 2023. URL https://arxiv.org/abs/2311.12022.", + "[65] Runway Research, 2024. URL https://runwayml.com/research/introducing-gen-3-alpha.", + "[66] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. L. Denton, K. Ghasemipour, R. Gontijo Lopes, B. Karagol Ayan, T. Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022.", + "[67] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom. Toolformer: Language models can teach themselves to use tools. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Yacmpz84TH.", + "[68] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms, 2017.", + "[69] U. Shaham, M. Ivgi, A. Efrat, J. Berant, and O. Levy. ZeroSCROLLS: A zero-shot benchmark for long text understanding. In H. Bouamor, J. Pino, and K. Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 7977-7989, Singapore, Dec. 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.536. URL https://aclanthology.org/2023-findings-emnlp.536.", + "[70] A. Singh, V. Natarajan, M. Shah, Y. Jiang, X. Chen, D. Batra, D. Parikh, and M. Rohrbach. Towards VQA models that can read. In CVPR, 2019.", + "[71] K. Sun, K. Huang, X. Liu, Y. Wu, Z. Xu, Z. Li, and X. Liu. T2V-CompBench: A comprehensive benchmark for compositional text-to-video generation. arXiv preprint arXiv:2407.14505, 2024.", + "[72] M. Suzgun, N. Scales, N. Scharli, S. Gehrmann, Y. Tay, H. W. Chung, A. Chowdhery, Q. V. Le, E. H. Chi, D. Zhou, , and J. Wei. Challenging BIG-Bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022.", + "[73] N. Team, M. R. Costa-jussa, J. Cross, O. Celebi, M. Elbayad, K. Heafield, K. Heffernan, E. Kalbassi, J. Lam, D. Licht, J. Maillard, A. Sun, S. Wang, G. Wenzek, A. Youngblood, B. Akula, L. Barrault, G. M. Gonzalez, P. Hansanti, J. Hoffman, S. Jarrett, K. R. Sadagopan, D. Rowe, S. Spruit, C. Tran, P. Andrews, N. F. Ayan, S. Bhosale, S. Edunov, A. Fan, C. Gao, V. Goswami, F. Guzmán, P. Koehn, A. Mourachko, C. Ropers, S. Saleem, H. Schwenk, and J. Wang. No language left behind: Scaling human-centered machine translation. 2022.", + "[74] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need, 2023. URL https://arxiv.org/abs/1706.03762.", + "[75] R. Vedantam, C. L. Zitnick, and D. Parikh. CIDEr: Consensus-based Image Description Evaluation. In CVPR, 2015.", + "[76] A. Wang, R. Y. Pang, A. Chen, J. Phang, and S. R. Bowman. SQuALITY: Building a long-document summarization dataset the hard way. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 1139–1156, Abu Dhabi, United Arab Emirates, Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.75." + ], + "bbox": [ + 112, + 90, + 883, + 912 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[77] W. Wang, Z. He, W. Hong, Y. Cheng, X. Zhang, J. Qi, X. Gu, S. Huang, B. Xu, Y. Dong, et al. LVBench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024.", + "[78] X. Wang, J. Wu, J. Chen, L. Li, Y.-F. Wang, and W. Y. Wang. VATEX: A large-scale, high-quality multilingual dataset for video-and-language research. In ICCV, 2019.", + "[79] J. Wei, X. Wang, D. Schuurmans, M. Bosma, B. Ichter, F. Xia, E. H. Chi, Q. V. Le, and D. Zhou. Chain-of-thought prompting elicits reasoning in large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, NIPS '22, Red Hook, NY, USA, 2024. Curran Associates Inc. ISBN 9781713871088.", + "[80] J. Xu, X. Liu, Y. Wu, Y. Tong, Q. Li, M. Ding, J. Tang, and Y. Dong. ImageReward: Learning and evaluating human preferences for text-to-image generation. Advances in Neural Information Processing Systems, 36, 2024.", + "[81] F. Yan, H. Mao, C. C.-J. Ji, T. Zhang, S. G. Patil, I. Stoica, and J. E. Gonzalez. Berkeley function calling leaderboard. 2024.", + "[82] X. Yang, K. Sun, H. Xin, Y. Sun, N. Bhalla, X. Chen, S. Choudhary, R. D. Gui, Z. W. Jiang, Z. Jiang, L. Kong, B. Moran, J. Wang, Y. E. Xu, A. Yan, C. Yang, E. Yuan, H. Zha, N. Tang, L. Chen, N. Scheffer, Y. Liu, N. Shah, R. Wanga, A. Kumar, W. tau Yih, and X. L. Dong. Crag – comprehensive rag benchmark. arXiv preprint arXiv:2406.04744, 2024. URL https://arxiv.org/abs/2406.04744.", + "[83] S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao. ReAct: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023.", + "[84] J. Yu, Y. Xu, J. Y. Koh, T. Luong, G. Baid, Z. Wang, V. Vasudevan, A. Ku, Y. Yang, B. K. Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022.", + "[85] X. Yue, Y. Ni, K. Zhang, T. Zheng, R. Liu, G. Zhang, S. Stevens, D. Jiang, W. Ren, Y. Sun, C. Wei, B. Yu, R. Yuan, R. Sun, M. Yin, B. Zheng, Z. Yang, Y. Liu, W. Huang, H. Sun, Y. Su, and W. Chen. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024.", + "[86] B. Zheng, B. Gou, J. Kil, H. Sun, and Y. Su. GPT-4V(ison) is a generalist web agent, if grounded. In ICML, 2024.", + "[87] L. Zheng, Z. Huang, Z. Xue, X. Wang, B. An, and S. Yan. AgentStudio: A toolkit for building general virtual agents. arXiv preprint arXiv:2403.17918, 2024.", + "[88] M. Zhong, A. Zhang, X. Wang, R. Hou, W. Xiong, C. Zhu, Z. Chen, L. Tan, C. Bi, M. Lewis, S. Popuri, S. Narang, M. Kambadur, D. Mahajan, S. Edunov, J. Han, and L. van der Maaten. Law of the weakest link: Cross capabilities of large language models. arXiv preprint arXiv:2409.19951, 2024.", + "[89] J. Zhou, T. Lu, S. Mishra, S. Brahma, S. Basu, Y. Luan, D. Zhou, and L. Hou. Instruction-following evaluation for large language models, 2023. URL https://arxiv.org/abs/2311.07911." + ], + "bbox": [ + 112, + 90, + 883, + 640 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 485, + 934, + 504, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "A Amazon Nova Canvas Capabilities", + "text_level": 1, + "bbox": [ + 112, + 89, + 441, + 107 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Our Nova Canvas model offers the following functionalities, with examples given in Figure 5.", + "bbox": [ + 112, + 119, + 730, + 135 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Text-to-image generation allows customers to create images with various resolutions (from $512 \\times 512$ up to $2\\mathrm{K} \\times 2\\mathrm{K}$ resolution).", + "- Editing allows developers to edit images using a combination of text prompt or mask image. Amazon Nova Canvas supports text-to-image editing and image-to-image editing, including inpainting, outpainting and object removal.", + "- Image variation allows customers to output images with similar contents but with variations from the user provided ones.", + "- Image conditioning provide a reference image along with a text prompt, resulting in outputs that follow the layout and structure of the user-supplied reference.", + "- Image guidance with color palette allows customers to precisely control the color palette of generated images by providing a list of hex codes along with the text prompt.", + "- Background removal automatically removes background from images containing multiple objects." + ], + "bbox": [ + 156, + 145, + 880, + 337 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/76e139eb48ebbe67c6ae23af9d987841e615fe510fc47192d81ab31c91976ad0.jpg", + "image_caption": [ + "A dinosaur sitting in a tea cup" + ], + "image_footnote": [], + "bbox": [ + 308, + 157, + 467, + 280 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/b6eeeaf2872be27c90e24802e500673f2edd6c10373e7d61cccf86e50079d449.jpg", + "image_caption": [ + "(b) Inpainting the image with swans" + ], + "image_footnote": [], + "bbox": [ + 500, + 209, + 656, + 279 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/a0f9f9877678b8c10c66531b68e095c59e1aa8c5547165a942d51f5903c8cfd2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 209, + 815, + 279 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/f067e42572ed86ce39b690f5084a986840fe5c7a607cd71a992dbb2c6eca7bd7.jpg", + "image_caption": [ + "(a) Image generation from a text prompt" + ], + "image_footnote": [], + "bbox": [ + 151, + 313, + 308, + 436 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/c5cb0ef9a0a9732d58e0989d6f1191010a2c9f768a1103091663080bd660e849.jpg", + "image_caption": [ + "change flowers to orange color" + ], + "image_footnote": [], + "bbox": [ + 310, + 325, + 467, + 448 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/bd620d782bd91e54f3c9b6c828c79489b562e1ef53f3d5fefaf88840c93693bb.jpg", + "image_caption": [ + "(d) Outpainting a new background" + ], + "image_footnote": [], + "bbox": [ + 500, + 337, + 656, + 459 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/9adcd93c3a348a977378541a3eb9005b49c67785526792f626c35ec61a8e75a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 337, + 815, + 459 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/797ec36ddec755043f50d9c4c3a8db04f6feb57495ebcfb57ee80e75e8722356.jpg", + "image_caption": [ + "(c) Image editing" + ], + "image_footnote": [], + "bbox": [ + 151, + 494, + 308, + 616 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/baa82e589e4f25edd6757b4df2d5b65509e507cf1609604b25a0fb5bbd27d127.jpg", + "image_caption": [ + "a hamster eats apple slice" + ], + "image_footnote": [], + "bbox": [ + 310, + 507, + 467, + 628 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/249480146ec018334d61af0070a064c6e4def2de70d75b161a1be51e5c5abf24.jpg", + "image_caption": [ + "A wooden boat in summer" + ], + "image_footnote": [], + "bbox": [ + 500, + 494, + 656, + 616 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/f1232a213ee459c8d311a66e6cfa0b449d50930b4ecc8696660599b3d68a90b4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 507, + 815, + 628 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/21668492523acdf3cb014570bfb648a5eb6d1b6bd74d14386e769c83ee12b86c.jpg", + "image_caption": [ + "A jar of salad dressing in a rustic kitchen surrounded by fresh vegetables with studio lighting" + ], + "image_footnote": [], + "bbox": [ + 156, + 712, + 302, + 741 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/50ddd1488f51b07c178b9608ddaee633be7d742f40fcd2b07ebd87617516fae9.jpg", + "image_caption": [ + "(e) Style transfer", + "(g) Controlling the color palette", + "Figure 5: Example capabilities of Amazon Nova Canvas, our content generation model for images." + ], + "image_footnote": [], + "bbox": [ + 310, + 676, + 467, + 797 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/a676fc75d5145c13756d1582514a2a8d24bb66faa47989ebc9fadd33faf862de.jpg", + "image_caption": [ + "(f) Guided generation", + "(h) Background Removal" + ], + "image_footnote": [], + "bbox": [ + 500, + 675, + 656, + 797 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/1c1166d99bcb5b4b726864c65e11014aa1fe34f34def76dd67e8b8644c2f3a38.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 710, + 800, + 780 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "B Prompts and Scoring", + "text_level": 1, + "bbox": [ + 112, + 89, + 328, + 108 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Prompt templates used for Amazon Nova evaluations are given below, along with those used for select other public models where noted. Additional materials and evaluation results from this report can be found at:", + "bbox": [ + 111, + 119, + 885, + 148 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "https://huggingface.co.amazon-agi", + "bbox": [ + 352, + 161, + 642, + 176 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "B.1 Text evaluation", + "text_level": 1, + "bbox": [ + 112, + 191, + 264, + 205 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "B.1.1 Language Understanding", + "text_level": 1, + "bbox": [ + 112, + 217, + 346, + 233 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For MMLU:", + "bbox": [ + 112, + 239, + 200, + 253 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "What is the correct answer to this question: \nChoices: . Let's think step by step: \nBased on the above, what is the single, most likely answer choice? Answer in the format \"The correct answer is (insert answer here).\"", + "guess_lang": "txt", + "bbox": [ + 112, + 263, + 879, + 320 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For ARC-C:", + "bbox": [ + 112, + 335, + 199, + 349 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Given the following question and four candidate answers (A, B, C and D), choose the best answer. \nQuestion: \nYour response should end with \"The best answer is [the_answer_letter]\" where the [the_answer_letter] is one of A, B, C or D.", + "guess_lang": "txt", + "bbox": [ + 112, + 359, + 872, + 431 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For DROP:", + "bbox": [ + 112, + 446, + 191, + 460 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We use the following 6 shots:", + "bbox": [ + 112, + 460, + 308, + 474 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "- answer: $> -$ According to the passage, the European Coal and Steel Community was established in 1951 and became the EEC in 1958. 1958 - 1951 = 7. So the answer is 7 \npassage: $> -$ Since the 1970s, U.S. governments have negotiated managed-trade agreements, such as the North American Free Trade Agreement in the 1990s, the Dominican Republic-Central America Free Trade Agreement in 2006, and a number of bilateral agreements. In Europe, six countries formed the European Coal and Steel Community in 1951 which became the European Economic Community in 1958. Two core objectives of the EEC were the development of a common market, subsequently renamed the single market, and establishing a customs union between its member states. question: How many years did the European Coal and Steel Community exist? \n- answer: $> -$ According to the passage, $23.5\\%$ ages 18 to 24. $23.5\\%$ \npassage: $> -$ In the county, the population was spread out with $23.50\\%$ 18, $8.70\\%$ $13.30\\%$ \nquestion: $> -$ How many more percent are under the age of 18 compared to the 18 to 24 group? \n- answer: $> -$ According to the passage, Stafford threw 5 TD passes, 3 of which were to Johnson. $5 - 3 = 2$ . So the answer is 2 \npassage: $> -$ Playing in their second straight Thanksgiving game, the Eagles struggled especially on defense, where they were unable to stop the much-hyped Lions offense. The worst of it all was how unproven rookie Eric Rowe was tasked", + "guess_lang": "txt", + "bbox": [ + 127, + 484, + 800, + 912 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 29 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "with covering wide receiver Calvin Johnson, leading to Johnson catching 3 \ntouchdowns. Stafford's five passing touchdowns, including three of them to \nJohnson was too much for the Eagles to overcome and for the second \nconsecutive time this season, the Eagles gave up 45 points in a game. With \nthe loss, the Eagles drop to 4-7 on the season and 6-1 when playing on \nThanksgiving. \nquestion: How many TD passes did Stafford throw other than to Johnson? \n- answer: $>$ All the touchdown runs are: a 27-yard touchdown run, a 9-yard touchdown run, a 11-yard touchdown run. The smallest number among 27, 9, 11 is 9. So the shortest touchdown run was 9 yards. All the touchdown passes are: a 12-yard touchdown pass. So the longest touchdown pass was 12 yards. So the shortest touchdown run and the longest touchdown pass combine for $9 + 12 =$ 21 yards. So the answer is 21 passage: $>$ The Seahawks played the San Francisco 49ers. In the first quarter, the Hawks RB Julius Jones got a 27-yard TD run, along with DT Craig Terrill returning a fumble 9 yards for a touchdown. In the third quarter, the 49ers almost rallied as RB H. J. Torres made a 12-yard TD pass to Lucas Nelly, along with Mare kicking a 32-yard field goal. In the final quarter, Julius Jones got another 11-yard TD. question: $>$ How many yards do the shortest touchdown run and the longest touchdown pass combine for? \n- answer: $>$ The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. So the Ravens had 10 points at halftime. So the answer is 10 passage: $>$ The Steelers went home for a duel with the Baltimore Ravens. Pittsburgh would deliver the opening punch in the first quarter with a 1-yard touchdown from running back Rashard Mendenhall. The Ravens would make it even as running back Willis McGahee got a 9-yard TD. The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. The Steelers brought the game into overtime with a 38-yard field goal by Andrew Foster. The Ravens Billy Cundiff pulled off a winning 33-yard field goal in overtime. question: How many points did the Ravens have at halftime? \n- answer: $>$ The first and third quarters were the scoreless quarters. So there are 2 scoreless quarters. So the answer is 2 passage: $>$ The Vikings flew to Bank of America Stadium to face the Carolina Panthers. After a scoreless first quarter, Carolina got on the board with quarterback Matt Moore finding fullback Brad Hoover on a 1-yard TD pass. After yet another scoreless quarter, Carolina sealed the game as Matt Moore completed a 42-yard touchdown pass to wide receiver Steve Smith. question: How many scoreless quarters were there?", + "guess_lang": "txt", + "bbox": [ + 109, + 87, + 888, + 760 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "For each shot we provide the following instruction:", + "bbox": [ + 112, + 771, + 450, + 787 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Conclude your answer with: \"So the answer is {final answer}\". Make sure the final answer is in plain text format", + "bbox": [ + 112, + 794, + 870, + 824 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "And we create each user prompt as follows:", + "bbox": [ + 112, + 842, + 403, + 857 + ], + "page_idx": 30 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " ", + "guess_lang": "xml", + "bbox": [ + 112, + 864, + 230, + 906 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 486, + 935, + 503, + 946 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "For IFEval:", + "bbox": [ + 112, + 90, + 192, + 104 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "No particular prompt was added (query was inputted to the model).", + "bbox": [ + 112, + 104, + 555, + 119 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "For BBH:", + "bbox": [ + 112, + 125, + 181, + 138 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We use a preamble that describes the task, for example:", + "bbox": [ + 112, + 138, + 477, + 154 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Evaluate the result of a random Boolean expression.", + "bbox": [ + 112, + 161, + 550, + 176 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We then provide few shot examples in the following format:", + "bbox": [ + 112, + 191, + 508, + 208 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "< preamble> \nQuestion: \n \nLet's think step by step. \n. So the answer is ", + "guess_lang": "txt", + "bbox": [ + 112, + 213, + 612, + 284 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "And we follow this by the query:", + "bbox": [ + 112, + 299, + 330, + 315 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "< preamble> \nQuestion: \n \nLet's think step by step.", + "guess_lang": "txt", + "bbox": [ + 112, + 321, + 328, + 377 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "For each subject, We provide the subject-specific instructions as below:", + "bbox": [ + 112, + 393, + 581, + 407 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "- subject: booleanExpressions\n instruction: Conclude your answer with: \"So the answer is True or False.\"\n- subject: causal_judgement\n instruction: Conclude your answer with: \"So the answer is Yes or No.\"\n- subject: date_understanding\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: disambiguation_qa\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: dycklanguages\n instruction: Correctly close a Dyck-n word. Conclude your answer with: \"So the answer is {final answer}.\". Make sure the final answer is in plain text format\n- subject: formal_fallacies\n instruction: Conclude your answer with: \"So the answer is valid or invalid.\"\n- subject: geometric_shapes\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: hyperbaton\n instruction: Conclude your answer with: \"\\So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deductionfive Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deduction-seven Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deduction_three Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: movie Recommendation\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: multistep_arithmetic_two\n instruction: Conclude your answer with: \"So the answer is {final answer}.\". Make sure the final answer is in plain text format", + "guess_lang": "yaml", + "bbox": [ + 112, + 415, + 859, + 912 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "- subject: navigate\n instruction: Conclude your answer with: \"So the answer is Yes or No\".\n- subject: object_counting\n instruction: Conclude your answer with: \"So the answer is .\". Where is an integer\n- subject: penguins_in_a_table\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: reasoning_about_colored Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: ruin_names\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: salient Translation_error_detector\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: snarks\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: sports-understanding\n instruction: Conclude your answer with: \"So the answer is yes or no\".\n- subject: temporal_sequences\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjectsFive Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjects-seven Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjects_three Objects\n instruction: \"Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: web_of Lies\n instruction: Conclude your answer with: \"So the answer is Yes or No\".\n- subject: wordsorting\n instruction: Conclude your answer with: \"So the answer is word_1 word_2 ... word_n\".\"", + "guess_lang": "yaml", + "bbox": [ + 112, + 92, + 883, + 619 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "For GPQA:", + "text_level": 1, + "bbox": [ + 112, + 638, + 192, + 652 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "What is the correct answer to this question: \nChoices: . Let's think step by step: \nBased on the above, what is the single, most likely answer choice? Answer in the format \"The correct answer is (insert answer here).\"", + "guess_lang": "txt", + "bbox": [ + 109, + 667, + 880, + 726 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "B.1.2 Mathematical Reasoning", + "text_level": 1, + "bbox": [ + 112, + 763, + 344, + 779 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "For MATH, GSM8K:", + "bbox": [ + 112, + 789, + 258, + 804 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Solve the following math problem step by step. Remember to put your answer inside \\boxed{}", + "guess_lang": "txt", + "bbox": [ + 112, + 819, + 509, + 890 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "B.1.3 Translation", + "text_level": 1, + "bbox": [ + 112, + 90, + 250, + 104 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For Flores:", + "bbox": [ + 112, + 114, + 189, + 127 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Nova and LLama:", + "bbox": [ + 112, + 128, + 235, + 142 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Translate the following text into {tgt-lang}. Please output only the translated text with no prefix or introduction: {src}", + "bbox": [ + 112, + 151, + 879, + 181 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Gemini and GPT:", + "bbox": [ + 112, + 198, + 233, + 212 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Your job is to translate a sentence from {src-lang} into {tgt-lang}. Please output ONLY the translation and nothing else: {src}", + "bbox": [ + 112, + 220, + 861, + 251 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "B.1.4 Long Context", + "text_level": 1, + "bbox": [ + 112, + 277, + 264, + 292 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For SQuALITY (ZeroScrolls Benchmark), we use the standard prompt template for Amazon Nova and Gemini models as in [69]:", + "bbox": [ + 111, + 301, + 883, + 330 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "You are given a story and a question. Answer the question in a paragraph.", + "bbox": [ + 112, + 338, + 740, + 356 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Story:", + "bbox": [ + 112, + 366, + 169, + 378 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "", + "bbox": [ + 112, + 381, + 178, + 395 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Question:", + "bbox": [ + 112, + 407, + 192, + 421 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "", + "bbox": [ + 112, + 422, + 202, + 436 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Answer:", + "bbox": [ + 112, + 449, + 176, + 462 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "B.2 Multimodal evaluation", + "text_level": 1, + "bbox": [ + 112, + 492, + 315, + 506 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "B.2.1 MMMU", + "text_level": 1, + "bbox": [ + 112, + 517, + 228, + 531 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For multiple-choice questions:", + "bbox": [ + 112, + 541, + 316, + 556 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "With the image, the following question, and the four possible answers (A, B, C and D), select the correct answer. (A) (B) ... (X) - For clear-cut questions: Give the answer directly with minimal elaboration. - For complex questions: Adopt this step-by-step method: ## Step 1: [Concise description] [Brief explanation] ## Step 2: [Concise description] [Brief explanation] In every scenario, conclude with: The best answer is [the_answer_letter]. where [ the_answer_letter] is one of A, B, C or D. Let's proceed with a systematic approach", + "guess_lang": "txt", + "bbox": [ + 112, + 564, + 851, + 801 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For open-ended questions:", + "bbox": [ + 112, + 818, + 290, + 834 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "With the image and the following question, provide a correct answer.", + "bbox": [ + 112, + 842, + 696, + 857 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "", + "bbox": [ + 112, + 857, + 202, + 869 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For clear-cut questions: Give the answer directly with minimal elaboration.", + "- For complex questions: Adopt this step-by-step method:" + ], + "bbox": [ + 112, + 883, + 772, + 912 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Step 1: [Concise description]", + "bbox": [ + 112, + 90, + 390, + 104 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "[Brief explanation]", + "bbox": [ + 117, + 106, + 276, + 119 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Step 2: [Concise description]", + "bbox": [ + 116, + 119, + 387, + 133 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "[Brief explanation]", + "bbox": [ + 117, + 133, + 276, + 147 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "In every scenario, conclude with: The best answer is [the_answer Phrase]. where [ the_answer Phrase] is a concise and direct answer to the question Let's proceed with a systematic approach.", + "bbox": [ + 112, + 159, + 851, + 203 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "B.2.2 ChartQA, DocVQA, and TextVQA", + "text_level": 1, + "bbox": [ + 112, + 227, + 410, + 242 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "", + "bbox": [ + 112, + 253, + 202, + 266 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer the question using a single word or phrase.", + "bbox": [ + 112, + 267, + 540, + 281 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "B.2.3 VATEX", + "text_level": 1, + "bbox": [ + 112, + 306, + 222, + 319 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Render a clear and concise one-sentence summary of the video. The summary should be at least 10 words but no more than 20 words. Analyze the video first before summarizing it. Do not hallucinate objects.", + "bbox": [ + 111, + 330, + 866, + 375 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "B.2.4 EgoSchema", + "text_level": 1, + "bbox": [ + 112, + 398, + 250, + 412 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "You will be given a question about a video and three possible answer options. You will be provided frames from the video, sampled evenly across the video ", + "bbox": [ + 112, + 422, + 879, + 465 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(A) ", + "(B) ", + "(C)" + ], + "bbox": [ + 116, + 467, + 236, + 505 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer with the option's letter from the given choices directly.", + "bbox": [ + 112, + 507, + 660, + 521 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Answer with the option letter from the given choices directly.", + "bbox": [ + 112, + 521, + 643, + 535 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "B.2.5 VisualWebBench", + "text_level": 1, + "bbox": [ + 112, + 559, + 287, + 573 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "For the web captioning task:", + "bbox": [ + 112, + 584, + 303, + 598 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "\"You are given a screenshot of a webpage. Please generate the meta web description information of this webpage, i.e., content attribute in HTML element.", + "bbox": [ + 112, + 606, + 867, + 646 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "You should use this format, and do not output any explanation or any other contents: ", + "bbox": [ + 112, + 661, + 831, + 689 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "For the heading OCR task:", + "bbox": [ + 112, + 705, + 290, + 720 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "You are given a screenshot of a webpage. Please generate the main text within the screenshot, which can be regarded as the heading of the webpage.", + "bbox": [ + 112, + 729, + 808, + 758 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "You should directly tell me the first sentence of the main content, and do not output any explanation or any other contents.", + "bbox": [ + 112, + 770, + 877, + 799 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "For the web QA task:", + "bbox": [ + 112, + 815, + 256, + 830 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "", + "bbox": [ + 112, + 839, + 202, + 851 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "You should directly tell me your answer in the fewest words possible, and do not output any explanation or any other contents.", + "bbox": [ + 112, + 852, + 859, + 880 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "For the element OCR task:", + "bbox": [ + 112, + 897, + 290, + 910 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is .", + "bbox": [ + 112, + 99, + 859, + 130 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Please perform OCR in the bounding box and recognize the text content within the red bounding box.", + "bbox": [ + 112, + 142, + 836, + 171 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "For the action prediction task:", + "bbox": [ + 112, + 189, + 313, + 204 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is .", + "bbox": [ + 112, + 213, + 859, + 241 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Please select the best webpage description that matches the new webpage after clicking the selected element in the bounding box: ", + "bbox": [ + 112, + 242, + 852, + 282 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents.", + "bbox": [ + 112, + 296, + 859, + 325 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "For the element grounding task:", + "bbox": [ + 112, + 344, + 326, + 359 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "In this website screenshot, I have labeled IDs for some HTML elements as candicates. Tell me which one best matches the description: ", + "bbox": [ + 112, + 368, + 879, + 396 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents.", + "bbox": [ + 112, + 409, + 859, + 438 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "For the action grounding task:", + "bbox": [ + 112, + 455, + 313, + 470 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "In this website screenshot, I have labeled IDs for some HTML elements as candidates. Tell me which one I should click to complete the following task: ", + "bbox": [ + 112, + 481, + 877, + 510 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents.", + "bbox": [ + 112, + 522, + 859, + 551 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "B.2.6 MM-Mind2Web", + "text_level": 1, + "bbox": [ + 112, + 579, + 284, + 593 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() functions in playwright respectively). One next step means one operation within the three.", + "bbox": [ + 112, + 607, + 879, + 705 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "You are asked to complete the following task: ", + "bbox": [ + 112, + 718, + 596, + 733 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Previous Actions:", + "bbox": [ + 112, + 746, + 261, + 758 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "", + "bbox": [ + 112, + 760, + 271, + 773 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The screenshot below shows the webpage you see.", + "bbox": [ + 112, + 773, + 516, + 787 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Follow the following guidance to think step by step before outlining the next action step at the current stage:", + "bbox": [ + 112, + 814, + 879, + 843 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "(Current Webpage Identification)", + "bbox": [ + 112, + 854, + 388, + 869 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Firstly, think about what the current webpage is.", + "bbox": [ + 112, + 869, + 532, + 883 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "(Previous Action Analysis)", + "bbox": [ + 114, + 897, + 338, + 912 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step.", + "bbox": [ + 111, + 90, + 870, + 133 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(Screenshot Details Analysis)", + "text_level": 1, + "bbox": [ + 114, + 146, + 362, + 161 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.", + "bbox": [ + 112, + 161, + 869, + 257 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(Next Action Based on Webpage and Analysis)", + "text_level": 1, + "bbox": [ + 114, + 270, + 483, + 285 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.", + "bbox": [ + 112, + 285, + 877, + 340 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "To be successful, it is important to follow the following rules:", + "bbox": [ + 112, + 353, + 661, + 367 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. You should only issue a valid action given the current observation.", + "2. You should only issue one action at a time." + ], + "bbox": [ + 114, + 368, + 712, + 393 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(Reiteration)", + "text_level": 1, + "bbox": [ + 114, + 407, + 227, + 422 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "First, reiterate your next target element, its detailed location, and the corresponding operation.", + "bbox": [ + 112, + 422, + 861, + 450 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(Multichoice Question)", + "text_level": 1, + "bbox": [ + 114, + 463, + 303, + 477 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Below is a multi-choice question, where the choices are elements in the webpage. From the screenshot, find out where and what each one is on the webpage. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by reexamining the screenshot, the choices, and your further reasoning.", + "bbox": [ + 112, + 477, + 877, + 547 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "If none of these elements match your target element, please select, select .", + "bbox": [ + 112, + 559, + 875, + 574 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "None of the other options match the correct element.", + "bbox": [ + 116, + 574, + 566, + 587 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": ". None of the other options match the correct element.", + "bbox": [ + 112, + 588, + 764, + 601 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(Final Answer)Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.", + "bbox": [ + 112, + 614, + 877, + 671 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Format:", + "text_level": 1, + "bbox": [ + 114, + 684, + 176, + 696 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "ELEMENT: The uppercase letter of your choice.", + "bbox": [ + 112, + 710, + 500, + 726 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "ACTION: Choose an action from {CLICK, TYPE, SELECT, NONE}. Use NONE only if you choose option F for the ELEMENT", + "bbox": [ + 112, + 738, + 852, + 767 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "VALUE: Provide additional input based on ACTION.", + "bbox": [ + 112, + 780, + 524, + 794 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The VALUE means:", + "text_level": 1, + "bbox": [ + 112, + 808, + 251, + 821 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "If ACTION == TYPE, specify the text to be typed.", + "bbox": [ + 112, + 821, + 524, + 835 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "If ACTION == SELECT, specify the option to be chosen.", + "bbox": [ + 112, + 835, + 566, + 849 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "If ACTION == CLICK, write \"None\".", + "bbox": [ + 112, + 849, + 395, + 862 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "B.2.7 GroundUI-1K", + "text_level": 1, + "bbox": [ + 112, + 892, + 269, + 907 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Which action should I do if I want to Click on and where is the action? Express the location coordinates using the (x1, y1, x2, y2) format, scaled between 0 and 1000.", + "bbox": [ + 112, + 99, + 879, + 130 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "B.3 Functional Capabilities", + "text_level": 1, + "bbox": [ + 112, + 156, + 320, + 171 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "B.3.1 FinQA", + "text_level": 1, + "bbox": [ + 112, + 181, + 217, + 196 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Given the following finance question, analyze the question in details step-by-step before giving the final answer. Your answer should begin with \"Lets think step-by-step\". Your response should end with \"The answer is [the_final_answer]\", where [the_final_answer] should be the most concise answer without any explanation.", + "bbox": [ + 112, + 207, + 879, + 263 + ], + "page_idx": 37 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```\n```\n``", + "guess_lang": "txt", + "bbox": [ + 112, + 277, + 259, + 388 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "We use regex \"The answer is (.*)\" to extract the answer. We convert answers with percent signs and magnitude terms to decimal numerical representation (e.g. convert $1.3\\%$ to 0.013 and \"5.2 millions\" to 5,200,000). An answer is correct if it is identical to the ground truth when rounded to the same decimal places.", + "bbox": [ + 109, + 405, + 883, + 448 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "B.3.2 RAG", + "text_level": 1, + "bbox": [ + 112, + 460, + 205, + 474 + ], + "page_idx": 37 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are a teacher grading a quiz. \nYou are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. \nExample Format: \nQUESTION: question here \nSTUDENT ANSWER: student's answer here \nTRUE ANSWER: true answer here \nGRADE: Correct or Incorrect here \nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \nQUESTION: {query} \nSTUDENT ANSWER: {answer} \nTRUE ANSWER: {expected_answer} \nGRADE: \nYour response should be in json format as follows: \n{{ \"justification\": (Without mentioning the student/teacher framing of this prompt, explain why the STUDENT ANSWER is Correct or Incorrect. Use one or two sentences maximum. Keep the answer as concise as possible.), \"grade\": (correct or incorrect) \n}}", + "guess_lang": "txt", + "bbox": [ + 112, + 488, + 879, + 806 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 486, + 935, + 504, + 946 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "C Qualitative examples of multimodal intelligence", + "text_level": 1, + "bbox": [ + 112, + 89, + 550, + 108 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/b6fee84e3dec1c634c54caa6be6cd5718dbb5c0e2717596731f7713ff1f6cffc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 135, + 138, + 158, + 156 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "User input", + "text_level": 1, + "bbox": [ + 166, + 140, + 256, + 157 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "This looks like an amazing site. What is it and where is it located? Are there other fun things around this site to explore?", + "bbox": [ + 130, + 178, + 468, + 222 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/40ed9725a10bde768a14e4c23a1bbeaa48fa7bb2624851ec2ecc717d04925b21.jpg", + "image_caption": [ + "Figure 6: Created with Nova Pro. Photo taken by a team member." + ], + "image_footnote": [], + "bbox": [ + 174, + 224, + 433, + 474 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/8a0926b7ab13709954d88c169fa4b58825725dd4d4f17f64bb0c74906113cf3a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 138, + 542, + 156 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Response", + "text_level": 1, + "bbox": [ + 549, + 140, + 630, + 157 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "This is the Space Needle, a landmark in Seattle, Washington, USA. It is a well-known observation tower and a symbol of Seattle. It was built for the 1962 World's Fair and has since become a popular tourist attraction. The Space Needle offers panoramic views of the city, the surrounding mountains, and the Puget Sound.", + "bbox": [ + 519, + 178, + 848, + 280 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "The Space Needle is located in the Seattle Center, which is a cultural and entertainment hub. The Seattle Center is home to many other attractions, including the Chihuly Garden and Glass, the Museum of Pop Culture, and the Pacific Science Center. The Seattle Center also hosts events like concerts, festivals, and art exhibitions. The Seattle Center is also close to other popular attractions in Seattle, such as the Pike Place Market, the Seattle Aquarium, and the Seattle Art Museum. The city is known for its vibrant arts and culture scene, outdoor activities, and beautiful natural surroundings.", + "bbox": [ + 519, + 281, + 859, + 470 + ], + "page_idx": 38 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "User input: \"Search for Psych and buy the first episode\"", + "text_level": 1, + "bbox": [ + 130, + 155, + 542, + 170 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Agent step 1", + "text_level": 1, + "bbox": [ + 122, + 191, + 209, + 203 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/f62b7b8f0fa10516c22fcbbd7182f1adbd2b48c22b345d2038ebbd505fb4475e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 116, + 205, + 493, + 500 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Agent step 2", + "text_level": 1, + "bbox": [ + 513, + 191, + 601, + 203 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/40017c96e300bbe4f19d2bff766d3a5a22bc1aac134344ab06f81425b38ce48d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 205, + 883, + 498 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Agent step 3", + "text_level": 1, + "bbox": [ + 122, + 522, + 210, + 534 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/e2b9071e056e37ac4eb9d430bd3f923d778aeea4fa88dcd30d9470cf968999bb.jpg", + "image_caption": [ + "Figure 7: Example of a multimodal agent." + ], + "image_footnote": [], + "bbox": [ + 117, + 537, + 493, + 829 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Agent step 4", + "text_level": 1, + "bbox": [ + 513, + 522, + 602, + 534 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/b32c411bb61018a8cbea165020cca783de3eec6926bb4ae55ba05b11185084fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 537, + 883, + 829 + ], + "page_idx": 39 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 39 + }, + { + "type": "image", + "img_path": "images/d0909d37150b6e506c3322688439c181301d0021cd13c9fc9c174a56e6668fa5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 135, + 332, + 156, + 349 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "User input", + "text_level": 1, + "bbox": [ + 166, + 333, + 253, + 349 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "What happened to my tire? What should I do?", + "bbox": [ + 132, + 369, + 308, + 398 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/ac9cea2d55627a6fb2b6475d2b322f234e90b3b7231e196768db56042240a551.jpg", + "image_caption": [ + "Figure 8: Created with Nova Pro. Image source: [88]." + ], + "image_footnote": [], + "bbox": [ + 158, + 407, + 385, + 640 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/4182d1ad86670b1b607652228cf65789d3f9bb027a78f59a88228ade9b455e2d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 459, + 333, + 478, + 349 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Response", + "text_level": 1, + "bbox": [ + 485, + 334, + 563, + 349 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "It appears that your tire has a significant puncture, as evidenced by the deflated state and visible damage. Here's what you should do:", + "bbox": [ + 457, + 371, + 830, + 412 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Assess the Damage: Confirm the puncture by checking for any visible holes or debris embedded in the tire.", + "2. Check Tire Pressure: Ensure the tire is completely deflated.", + "3. Remove the Tire: If you're comfortable, you can remove the wheel to inspect the puncture more closely.", + "4. Temporary Fix: Use a tire repair kit to patch the hole if it's small. This is a temporary solution.", + "5. Professional Repair: Visit a tire shop for a proper repair or replacement. They can assess if the tire can be patched or if a new tire is needed.", + "6. Inspect Other Tires: Check the other tires for any signs of damage or wear." + ], + "bbox": [ + 457, + 414, + 854, + 585 + ], + "page_idx": 40 + }, + { + "type": "text", + "text": "Would you like more detailed instructions on any of these steps?", + "bbox": [ + 457, + 585, + 821, + 613 + ], + "page_idx": 40 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 485, + 935, + 503, + 946 + ], + "page_idx": 40 + }, + { + "type": "image", + "img_path": "images/11c83a4fc91e912143be2cb657581d8025a865186a75371afd4135014166cf8f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 135, + 99, + 158, + 118 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "User input", + "text_level": 1, + "bbox": [ + 166, + 101, + 256, + 118 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Group this sales data by month using pandas and then create a bar chart of the sales per month.", + "bbox": [ + 130, + 138, + 452, + 169 + ], + "page_idx": 41 + }, + { + "type": "table", + "img_path": "images/e89c885cecd0fdc170940dc1b4a7578c536bfa8bd858aa091078589ec47fa28a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MonthSales
Jan1523.13
Jan957
Feb133
March452
May521
June901
July1334
July131
Aug2601
Dec3005
", + "bbox": [ + 218, + 174, + 390, + 349 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/14520d6be03d8ce44c8e422a387e8ae19c3963854102147343ff7ee1426a2348.jpg", + "image_caption": [ + "Generated plot:" + ], + "image_footnote": [], + "bbox": [ + 158, + 388, + 449, + 530 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/aa02909025c79a2e3bc10826fb45b862b4bcc1dd1005746e68c4bf07a0a94712.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 101, + 542, + 117 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Response", + "text_level": 1, + "bbox": [ + 549, + 102, + 630, + 118 + ], + "page_idx": 41 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "import pandas as pd \nimport matplotlib.pyplot as plt \n# Data \ndata $=$ { Month':['Jan','Jan','Feb','March','May','June','July' July','Aug','Dec'], Sales':[1523.13,957,133,452,521,901,1334,131,2601,3005] } \n# Create DataFrame df $=$ pd.DataFrame(data) \n# Group by month and sum sales monthly_sales $=$ df.groupby('Month')[Sales'].sum().reset_index() \n# Sort months based on calendar monthly_sales['Month'] $=$ pd.Categorical( monthly_sales['Month'], categories $\\coloneqq$ ['Jan','Feb','March','April','May','June','July', Aug,'Sept','Oct','Nov','Dec'], ordered=True \nmonthsales $=$ monthly_sales.sort_values('Month') \n# Plot bar chart plt.figure(figsize=(10,6)) plt.bar(monthly_sales['Month'], monthly_sales['Sales'], color $\\equiv$ 'blue') plt.xlabel('Month') pltylabel('Total Sales') plt.title('Total Sales per Month') plt.show()", + "guess_lang": "python", + "bbox": [ + 519, + 136, + 864, + 444 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/ca75d134a16e3155d8778b1af440402be2f8402314bc6cd2b56a644be95182cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 135, + 575, + 158, + 594 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "User input", + "text_level": 1, + "bbox": [ + 166, + 577, + 256, + 594 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "How many people say the European Union responded well to the pandemic?", + "bbox": [ + 130, + 614, + 410, + 645 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Poor marks for America's response to coronavirus outbreak", + "text_level": 1, + "bbox": [ + 176, + 667, + 411, + 691 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "% who say each has done a job dealing with the coronavirus outbreak", + "bbox": [ + 176, + 695, + 403, + 715 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/e537c2f1ffe8ae9930b85ad5babd789496b6b51b9653a1c9bf7e807992b5257a.jpg", + "image_caption": [ + "Figure 10: Created with Nova Lite. Image source: ChartQA dataset [50]." + ], + "image_footnote": [], + "bbox": [ + 178, + 719, + 424, + 803 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Note: Percentages are medians based on 13 countries. Those who did not answer are not shown. Source: Summer 2020 Global Attitudes Survey. Q10a-e. U.S. Image Plummets Internationally as Most Say Country Has Handled Coronavirus Badly", + "bbox": [ + 176, + 806, + 423, + 849 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "PEW RESEARCH CENTER", + "bbox": [ + 178, + 852, + 272, + 861 + ], + "page_idx": 41 + }, + { + "type": "image", + "img_path": "images/ac91511bde96ff7f67aab4256d93ae2e68faf1e7ea3de954ee8f09249bf14491.jpg", + "image_caption": [ + "Figure 9: Created with Nova Pro." + ], + "image_footnote": [], + "bbox": [ + 522, + 577, + 542, + 594 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "Response", + "text_level": 1, + "bbox": [ + 549, + 578, + 630, + 595 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "The chart shows that $57\\%$ of people say the European Union responded well to the pandemic. This is indicated by the green bar labeled \"Good\" for the EU, which extends to a height of $57\\%$ . This suggests that a majority of the people surveyed had a positive view of the EU's response to the pandemic.", + "bbox": [ + 519, + 614, + 859, + 717 + ], + "page_idx": 41 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 41 + }, + { + "type": "text", + "text": "D Correspondence and Contributors", + "text_level": 1, + "bbox": [ + 112, + 89, + 439, + 107 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Please direct all correspondences to:", + "bbox": [ + 112, + 119, + 354, + 133 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "nova-technical-report@amazon.com", + "bbox": [ + 343, + 150, + 653, + 166 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "The Nova family of models were built by the Amazon Artificial General Intelligence (AGI) organization and partner teams.", + "bbox": [ + 109, + 181, + 883, + 209 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "When citing this report, please use \"Amazon AGI\" as the sole author, as shown in the bibtex entry below.", + "bbox": [ + 111, + 215, + 802, + 232 + ], + "page_idx": 42 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "@misc{novatechreport, author = {Amazon AGI}, title = {The Amazon Nova Family of Models: Technical Report and Model Card}, year = {2024}, url = {https://www.amazon.science/publications/the-amazon-nova-family-of-models-technical-report-and-model-card} }", + "guess_lang": "bib", + "bbox": [ + 112, + 238, + 815, + 335 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "D.1 Contributors", + "text_level": 1, + "bbox": [ + 112, + 364, + 250, + 378 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "The following individuals worked in the Nova program for at least one-fifth of its duration and measurably impacted one or more of the models or services described in this report.", + "bbox": [ + 111, + 388, + 883, + 417 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aaron Langford", + "bbox": [ + 112, + 431, + 223, + 446 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aayush Shah", + "bbox": [ + 112, + 446, + 202, + 459 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhanshu Gupta", + "bbox": [ + 114, + 460, + 225, + 474 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhimanyu Bhatter", + "bbox": [ + 114, + 474, + 245, + 487 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhinav Goyal", + "bbox": [ + 114, + 488, + 215, + 501 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhinav Mathur", + "bbox": [ + 114, + 502, + 225, + 513 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhinav Mohanty", + "bbox": [ + 114, + 516, + 235, + 529 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhishek Kumar", + "bbox": [ + 114, + 530, + 228, + 541 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abhishek Sethi", + "bbox": [ + 114, + 542, + 215, + 554 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abi Komma", + "bbox": [ + 114, + 556, + 197, + 569 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Abner Pena", + "bbox": [ + 114, + 571, + 192, + 582 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Achin Jain", + "bbox": [ + 114, + 584, + 186, + 595 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Adam Kunysz", + "bbox": [ + 114, + 599, + 210, + 611 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Adam Opyrchal", + "bbox": [ + 114, + 612, + 220, + 625 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Adarsh Singh", + "bbox": [ + 114, + 626, + 205, + 638 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aditya Rawal", + "bbox": [ + 114, + 640, + 205, + 652 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Adok Achar Budihal Prasad", + "bbox": [ + 114, + 652, + 299, + 665 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Adrià de Gispert", + "bbox": [ + 114, + 667, + 225, + 679 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Agnika Kumar", + "bbox": [ + 114, + 681, + 212, + 694 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aishwarya Aryamane", + "bbox": [ + 114, + 695, + 259, + 708 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ajay Nair", + "bbox": [ + 114, + 709, + 181, + 722 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Akilan M", + "bbox": [ + 114, + 723, + 179, + 734 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Akshaya Iyengar", + "bbox": [ + 114, + 737, + 227, + 750 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Akshaya Vishnu Kudlu Shanbhogue", + "bbox": [ + 114, + 750, + 352, + 763 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alan He", + "bbox": [ + 114, + 763, + 171, + 775 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alessandra Cervone", + "bbox": [ + 114, + 777, + 248, + 789 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alex Loeb", + "bbox": [ + 114, + 791, + 186, + 801 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alex Zhang", + "bbox": [ + 114, + 804, + 194, + 816 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alexander Fu", + "bbox": [ + 114, + 819, + 205, + 830 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alexander Lisnichenko", + "bbox": [ + 114, + 832, + 267, + 844 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alexander Zhipa", + "bbox": [ + 114, + 845, + 225, + 859 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alexandros Potamianos", + "bbox": [ + 114, + 861, + 271, + 872 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ali Kebarighotbi", + "bbox": [ + 114, + 875, + 225, + 887 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aliakbar Daronkolaei", + "bbox": [ + 114, + 888, + 256, + 898 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Alok Parmesh", + "bbox": [ + 374, + 431, + 470, + 445 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amanjot Kaur Samra", + "bbox": [ + 374, + 446, + 516, + 459 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ameen Khan", + "bbox": [ + 375, + 460, + 462, + 472 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amer Rez", + "bbox": [ + 375, + 474, + 442, + 486 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amir Saffari", + "bbox": [ + 375, + 488, + 459, + 500 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amit Agarwalla", + "bbox": [ + 375, + 502, + 480, + 513 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amit Jhindal", + "bbox": [ + 375, + 516, + 460, + 527 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amith Mamidala", + "bbox": [ + 375, + 529, + 488, + 541 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ammar Asmro", + "bbox": [ + 375, + 542, + 473, + 554 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Amulya Ballakur", + "bbox": [ + 375, + 556, + 490, + 569 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anand Mishra", + "bbox": [ + 375, + 571, + 470, + 582 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anand Sridharan", + "bbox": [ + 375, + 584, + 486, + 595 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anastasiia Dubinina", + "bbox": [ + 375, + 599, + 509, + 609 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andre Lenz", + "bbox": [ + 375, + 612, + 455, + 625 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andreas Doerr", + "bbox": [ + 375, + 626, + 473, + 637 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andrew Keating", + "bbox": [ + 375, + 640, + 483, + 652 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andrew Leaver", + "bbox": [ + 375, + 654, + 477, + 665 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andrew Smith", + "bbox": [ + 375, + 667, + 472, + 679 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andrew Wirth", + "bbox": [ + 375, + 680, + 470, + 691 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andy Davey", + "bbox": [ + 375, + 695, + 457, + 707 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andy Rosenbaum", + "bbox": [ + 375, + 709, + 493, + 720 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Andy Sohn", + "bbox": [ + 375, + 722, + 450, + 734 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Angela Chan", + "bbox": [ + 375, + 736, + 462, + 750 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aniket Chakrabarti", + "bbox": [ + 375, + 750, + 501, + 762 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anil Ramakrishna", + "bbox": [ + 375, + 763, + 496, + 775 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anirban Roy", + "bbox": [ + 375, + 777, + 460, + 790 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anita Iyer", + "bbox": [ + 375, + 792, + 442, + 804 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anjali Narayan-Chen", + "bbox": [ + 375, + 805, + 516, + 816 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ankith Yennu", + "bbox": [ + 375, + 819, + 467, + 830 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anna Dabrowska", + "bbox": [ + 375, + 832, + 490, + 844 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anna Gawlowska", + "bbox": [ + 375, + 845, + 493, + 858 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anna Rumshisky", + "bbox": [ + 375, + 859, + 488, + 873 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anna Turek", + "bbox": [ + 375, + 875, + 454, + 885 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anoop Deoras", + "bbox": [ + 375, + 887, + 470, + 901 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anton Bezruchkin", + "bbox": [ + 635, + 431, + 756, + 445 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anup Prasad", + "bbox": [ + 635, + 446, + 720, + 459 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anupam Dewan", + "bbox": [ + 635, + 460, + 743, + 474 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Anwith Kiran", + "bbox": [ + 635, + 474, + 727, + 486 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Apoory Gupta", + "bbox": [ + 635, + 488, + 730, + 501 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aram Galstyan", + "bbox": [ + 635, + 502, + 735, + 515 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Aravind Manoharan", + "bbox": [ + 635, + 516, + 767, + 527 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arijit Biswas", + "bbox": [ + 635, + 529, + 723, + 542 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arindam Mandal", + "bbox": [ + 635, + 542, + 748, + 555 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arpit Gupta", + "bbox": [ + 635, + 556, + 715, + 569 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arsamkhan Pathan", + "bbox": [ + 635, + 571, + 759, + 583 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arun Nagarajan", + "bbox": [ + 635, + 584, + 741, + 598 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arushan Rajasekaram", + "bbox": [ + 635, + 599, + 781, + 611 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Arvind Sundararajan", + "bbox": [ + 635, + 612, + 772, + 625 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ashwin Ganesan", + "bbox": [ + 635, + 626, + 746, + 637 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ashwin Swaminathan", + "bbox": [ + 635, + 638, + 779, + 652 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Athanasios Mouchtaris", + "bbox": [ + 635, + 652, + 787, + 665 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Audrey Champeau", + "bbox": [ + 635, + 667, + 759, + 680 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Avik Ray", + "bbox": [ + 635, + 681, + 699, + 694 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ayush Jaiswal", + "bbox": [ + 635, + 695, + 730, + 707 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ayush Sharma", + "bbox": [ + 635, + 709, + 732, + 720 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bailey Keefer", + "bbox": [ + 635, + 722, + 728, + 734 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Balamurugan Muthiah", + "bbox": [ + 635, + 736, + 784, + 750 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Beatrix Leon-Millan", + "bbox": [ + 635, + 750, + 771, + 762 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ben Koopman", + "bbox": [ + 635, + 763, + 730, + 776 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Ben Li", + "bbox": [ + 635, + 777, + 681, + 789 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Benjamin Biggs", + "bbox": [ + 635, + 791, + 743, + 804 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Benjamin Ott", + "bbox": [ + 635, + 805, + 725, + 816 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bhanu Vinzamuri", + "bbox": [ + 635, + 819, + 751, + 830 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bharath Venkatesh", + "bbox": [ + 635, + 832, + 759, + 844 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bhavana Ganesh", + "bbox": [ + 635, + 845, + 746, + 858 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bhoomit Vasani", + "bbox": [ + 635, + 859, + 741, + 871 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bill Byrne", + "bbox": [ + 635, + 875, + 705, + 886 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bill Hsu", + "bbox": [ + 635, + 887, + 692, + 898 + ], + "page_idx": 42 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 42 + }, + { + "type": "text", + "text": "Bincheng Wang", + "bbox": [ + 112, + 90, + 222, + 106 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Blake King", + "bbox": [ + 116, + 106, + 191, + 119 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Blazej Gorny", + "bbox": [ + 116, + 119, + 204, + 133 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bo Feng", + "bbox": [ + 116, + 133, + 173, + 147 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bo Zheng", + "bbox": [ + 116, + 147, + 181, + 160 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bodhisattwa Paul", + "bbox": [ + 116, + 161, + 230, + 172 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bofan Sun", + "bbox": [ + 116, + 175, + 186, + 186 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bofeng Luo", + "bbox": [ + 116, + 188, + 194, + 200 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bowen Chen", + "bbox": [ + 116, + 202, + 200, + 214 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Bowen Xie", + "bbox": [ + 116, + 215, + 189, + 227 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Boya Yu", + "bbox": [ + 116, + 229, + 173, + 243 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Brendan Jugan", + "bbox": [ + 116, + 244, + 212, + 256 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Brett Panosh", + "bbox": [ + 116, + 257, + 200, + 268 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Brian Collins", + "bbox": [ + 116, + 271, + 204, + 282 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Brian Thompson", + "bbox": [ + 116, + 285, + 225, + 297 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Can Karakus", + "bbox": [ + 116, + 299, + 200, + 311 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Can Liu", + "bbox": [ + 116, + 313, + 169, + 324 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Carl Lambrecht", + "bbox": [ + 116, + 325, + 218, + 338 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Carly Lin", + "bbox": [ + 116, + 340, + 179, + 352 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Carolyn Wang", + "bbox": [ + 116, + 354, + 210, + 367 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Carrie Yuan", + "bbox": [ + 116, + 368, + 194, + 380 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Casey Loyda", + "bbox": [ + 116, + 382, + 202, + 393 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Cezary Walczak", + "bbox": [ + 116, + 395, + 222, + 407 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chalapathi Choppa", + "bbox": [ + 116, + 409, + 241, + 422 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chandana Satya Prakash", + "bbox": [ + 116, + 422, + 277, + 435 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chankrisna Richy Meas", + "bbox": [ + 116, + 436, + 272, + 449 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Charith Peris", + "bbox": [ + 116, + 450, + 202, + 463 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Charles Recaido", + "bbox": [ + 116, + 464, + 223, + 476 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Charlie Xu", + "bbox": [ + 116, + 478, + 187, + 489 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Charul Sharma", + "bbox": [ + 116, + 491, + 214, + 503 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chase Kernan", + "bbox": [ + 116, + 505, + 207, + 517 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chayut Thanapirom", + "bbox": [ + 116, + 518, + 246, + 532 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chengwei Su", + "bbox": [ + 116, + 532, + 204, + 545 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chenhao Xu", + "bbox": [ + 116, + 547, + 197, + 558 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chenhao Yin", + "bbox": [ + 116, + 561, + 202, + 571 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chentao Ye", + "bbox": [ + 116, + 574, + 192, + 585 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chenyang Tao", + "bbox": [ + 116, + 588, + 210, + 601 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chethan Parameshwara", + "bbox": [ + 116, + 602, + 269, + 614 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ching-Yun Chang", + "bbox": [ + 116, + 614, + 235, + 628 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chong Li", + "bbox": [ + 116, + 630, + 178, + 642 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chris Hench", + "bbox": [ + 116, + 643, + 197, + 655 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chris Tran", + "bbox": [ + 116, + 657, + 186, + 667 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Christophe Dupuy", + "bbox": [ + 116, + 671, + 236, + 684 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Christopher Davis", + "bbox": [ + 116, + 685, + 235, + 698 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Christopher DiPersio", + "bbox": [ + 116, + 699, + 254, + 710 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Christos Christodoulopoulos", + "bbox": [ + 116, + 712, + 303, + 724 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Christy Li", + "bbox": [ + 116, + 726, + 183, + 738 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Chun Chen", + "bbox": [ + 116, + 739, + 189, + 751 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Claudio Delli Bovi", + "bbox": [ + 116, + 753, + 240, + 765 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Clement Chung", + "bbox": [ + 116, + 767, + 220, + 781 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Cole Hawkins", + "bbox": [ + 116, + 781, + 209, + 792 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Connor Harris", + "bbox": [ + 116, + 795, + 210, + 806 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Corey Ropell", + "bbox": [ + 116, + 809, + 202, + 821 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Cynthia He", + "bbox": [ + 116, + 821, + 192, + 835 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "DK Joo", + "bbox": [ + 116, + 835, + 166, + 847 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dae Yon Hwang", + "bbox": [ + 116, + 849, + 223, + 863 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dan Rosen", + "bbox": [ + 116, + 864, + 189, + 875 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Daniel Elkind", + "bbox": [ + 116, + 877, + 207, + 888 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Daniel Pressel", + "bbox": [ + 116, + 891, + 210, + 902 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Daniel Zhang", + "bbox": [ + 375, + 90, + 467, + 106 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Danielle Kimball", + "bbox": [ + 375, + 106, + 488, + 118 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Daniil Sorokin", + "bbox": [ + 375, + 119, + 473, + 131 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dave Goodell", + "bbox": [ + 375, + 133, + 467, + 145 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Davide Modolo", + "bbox": [ + 375, + 147, + 478, + 157 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dawei Zhu", + "bbox": [ + 375, + 160, + 450, + 172 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Deepikaa Suresh", + "bbox": [ + 375, + 174, + 486, + 186 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Deepti Raga", + "bbox": [ + 375, + 188, + 465, + 200 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Denis Filimonov", + "bbox": [ + 375, + 202, + 486, + 214 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Denis Foo Kune", + "bbox": [ + 375, + 215, + 483, + 227 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Denis Romasanta Rodriguez", + "bbox": [ + 375, + 229, + 563, + 243 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Devamanyu Hazarika", + "bbox": [ + 375, + 244, + 517, + 256 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dhananjay Ram", + "bbox": [ + 375, + 257, + 482, + 270 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dhawal Parkar", + "bbox": [ + 375, + 271, + 473, + 282 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dhawal Patel", + "bbox": [ + 375, + 285, + 464, + 297 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dhwanil Desai", + "bbox": [ + 375, + 299, + 473, + 310 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dinesh Singh Rajput", + "bbox": [ + 375, + 313, + 513, + 325 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Disha Sule", + "bbox": [ + 375, + 327, + 449, + 338 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Diwakar Singh", + "bbox": [ + 375, + 340, + 475, + 353 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dmitriy Genzel", + "bbox": [ + 375, + 354, + 478, + 366 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dolly Goldenberg", + "bbox": [ + 375, + 368, + 495, + 380 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dongyi He", + "bbox": [ + 375, + 382, + 449, + 393 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dumitru Hanciu", + "bbox": [ + 375, + 395, + 483, + 407 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dushan Tharmal", + "bbox": [ + 375, + 409, + 486, + 421 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Dzmitry Siankovich", + "bbox": [ + 375, + 422, + 508, + 435 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Edi Cikovic", + "bbox": [ + 375, + 436, + 455, + 448 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Edwin Abraham", + "bbox": [ + 375, + 450, + 483, + 462 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ekraam Sabir", + "bbox": [ + 375, + 464, + 467, + 476 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Elliott Olson", + "bbox": [ + 375, + 478, + 460, + 489 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Emmett Steven", + "bbox": [ + 375, + 491, + 475, + 503 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Emre Barut", + "bbox": [ + 375, + 505, + 452, + 517 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Eric Jackson", + "bbox": [ + 375, + 518, + 460, + 531 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ethan Wu", + "bbox": [ + 375, + 532, + 441, + 545 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Evelyn Chen", + "bbox": [ + 375, + 547, + 460, + 560 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ezhilan Mahalingam", + "bbox": [ + 375, + 561, + 513, + 573 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Fabian Triefenbach", + "bbox": [ + 375, + 574, + 503, + 585 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Fan Yang", + "bbox": [ + 375, + 588, + 439, + 601 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Fangyu Liu", + "bbox": [ + 375, + 602, + 452, + 614 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Fanzi Wu", + "bbox": [ + 375, + 616, + 439, + 627 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Faraz Tavakoli", + "bbox": [ + 375, + 628, + 473, + 641 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Farhad Khozeimeh", + "bbox": [ + 375, + 643, + 501, + 655 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Feiyang Niu", + "bbox": [ + 375, + 657, + 457, + 670 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Felix Hieber", + "bbox": [ + 375, + 671, + 459, + 683 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Feng Li", + "bbox": [ + 375, + 685, + 428, + 696 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "First Elbey", + "bbox": [ + 375, + 699, + 450, + 710 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Florian Krebs", + "bbox": [ + 375, + 712, + 467, + 724 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Florian Saupe", + "bbox": [ + 375, + 726, + 467, + 739 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Florian Sprunken", + "bbox": [ + 375, + 739, + 490, + 753 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Frank Fan", + "bbox": [ + 375, + 753, + 442, + 765 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Furqan Khan", + "bbox": [ + 375, + 767, + 462, + 780 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gabriela De Vincenzo", + "bbox": [ + 375, + 781, + 522, + 792 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gagandeep Kang", + "bbox": [ + 375, + 794, + 490, + 808 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "George Ding", + "bbox": [ + 375, + 809, + 460, + 821 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "George He", + "bbox": [ + 375, + 821, + 447, + 835 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "George Yeung", + "bbox": [ + 375, + 835, + 470, + 849 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ghada Qaddoumi", + "bbox": [ + 375, + 849, + 491, + 862 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Giannis Karamanolakis", + "bbox": [ + 375, + 864, + 529, + 875 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Goeric Huybrechts", + "bbox": [ + 375, + 877, + 501, + 890 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gokul Maddali", + "bbox": [ + 375, + 891, + 475, + 902 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gonzalo Iglesias", + "bbox": [ + 635, + 90, + 746, + 104 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gordon McShane", + "bbox": [ + 635, + 106, + 753, + 118 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gozde Sahin", + "bbox": [ + 635, + 119, + 720, + 131 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Guangtai Huang", + "bbox": [ + 635, + 133, + 745, + 146 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gukyeong Kwon", + "bbox": [ + 635, + 147, + 748, + 160 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gunnar A. Sigurdsson", + "bbox": [ + 635, + 161, + 782, + 174 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gurpreet Chadha", + "bbox": [ + 635, + 175, + 750, + 186 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Gururaj Kosuru", + "bbox": [ + 635, + 188, + 738, + 202 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hagen Fuerstenau", + "bbox": [ + 635, + 203, + 754, + 215 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hah Hah", + "bbox": [ + 635, + 215, + 696, + 227 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Haja Maideen", + "bbox": [ + 635, + 229, + 728, + 242 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hajime Hosokawa", + "bbox": [ + 635, + 244, + 758, + 256 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Han Liu", + "bbox": [ + 635, + 257, + 692, + 268 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Han-Kai Hsu", + "bbox": [ + 635, + 271, + 723, + 282 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hann Wang", + "bbox": [ + 635, + 285, + 714, + 297 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hao Li", + "bbox": [ + 635, + 299, + 683, + 310 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hao Yang", + "bbox": [ + 635, + 313, + 702, + 325 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Haofeng Zhu", + "bbox": [ + 635, + 327, + 723, + 339 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Haozheng Fan", + "bbox": [ + 635, + 340, + 730, + 353 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Harman Singh", + "bbox": [ + 635, + 354, + 732, + 367 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Harshavardhan Kaluvala", + "bbox": [ + 635, + 368, + 797, + 380 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hashim Saeed", + "bbox": [ + 635, + 382, + 730, + 393 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "He Xie", + "bbox": [ + 635, + 395, + 684, + 406 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Helian Feng", + "bbox": [ + 635, + 409, + 717, + 421 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hendrix", + "bbox": [ + 635, + 422, + 694, + 435 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hengzhi Pei", + "bbox": [ + 635, + 436, + 717, + 449 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Henrik Nielsen", + "bbox": [ + 635, + 450, + 736, + 462 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hesam Ilati", + "bbox": [ + 635, + 464, + 712, + 476 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Himanshu Patel", + "bbox": [ + 635, + 478, + 741, + 489 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hongshan Li", + "bbox": [ + 635, + 491, + 722, + 503 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hongzhou Lin", + "bbox": [ + 635, + 506, + 732, + 518 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Hussain Raza", + "bbox": [ + 635, + 518, + 725, + 531 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ian Cullinan", + "bbox": [ + 635, + 532, + 718, + 544 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Imre Kiss", + "bbox": [ + 635, + 547, + 700, + 558 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Inbarasan Thangamani", + "bbox": [ + 635, + 561, + 785, + 573 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Indrayani Fadnavis", + "bbox": [ + 635, + 575, + 761, + 585 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ionut Teodor Sorodoc", + "bbox": [ + 635, + 588, + 779, + 599 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Irem Ertuerk", + "bbox": [ + 635, + 602, + 720, + 613 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Iryna Yemialyanava", + "bbox": [ + 635, + 616, + 767, + 628 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ishan Soni", + "bbox": [ + 635, + 630, + 705, + 641 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ismail Jelal", + "bbox": [ + 635, + 643, + 710, + 654 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Ivan Tse", + "bbox": [ + 635, + 657, + 694, + 667 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jack FitzGerald", + "bbox": [ + 635, + 671, + 740, + 681 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jack Zhao", + "bbox": [ + 635, + 685, + 704, + 696 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jackson Rothgeb", + "bbox": [ + 635, + 699, + 748, + 710 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jacky Lee", + "bbox": [ + 635, + 712, + 702, + 724 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jake Jung", + "bbox": [ + 635, + 726, + 700, + 739 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jakub Debski", + "bbox": [ + 635, + 739, + 725, + 751 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jakub Tomczak", + "bbox": [ + 635, + 753, + 738, + 765 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "James Jeun", + "bbox": [ + 635, + 767, + 710, + 777 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "James Sanders", + "bbox": [ + 635, + 781, + 732, + 792 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jason Crowley", + "bbox": [ + 635, + 795, + 732, + 808 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jay Lee", + "bbox": [ + 635, + 809, + 687, + 821 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jayakrishna Anvesh Paidy", + "bbox": [ + 635, + 821, + 807, + 835 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jayant Tiwari", + "bbox": [ + 635, + 835, + 725, + 849 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jean Farmer", + "bbox": [ + 635, + 849, + 717, + 861 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jeff Solinsky", + "bbox": [ + 635, + 864, + 722, + 876 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jenna Lau", + "bbox": [ + 635, + 878, + 702, + 888 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jeremy Savareese", + "bbox": [ + 635, + 891, + 753, + 904 + ], + "page_idx": 43 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 43 + }, + { + "type": "text", + "text": "Jerzy Zagorski", + "bbox": [ + 112, + 90, + 212, + 104 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ji Dai", + "bbox": [ + 114, + 106, + 155, + 117 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiacheng (JC) Gu", + "bbox": [ + 114, + 119, + 230, + 133 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiahui Li", + "bbox": [ + 114, + 133, + 174, + 145 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jian (Skyler) Zheng", + "bbox": [ + 114, + 147, + 246, + 161 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jianhua Lu", + "bbox": [ + 114, + 162, + 189, + 172 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jianhua Wang", + "bbox": [ + 114, + 175, + 207, + 188 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiawei Dai", + "bbox": [ + 114, + 189, + 184, + 200 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiawei Mo", + "bbox": [ + 114, + 202, + 184, + 213 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiaxi Xu", + "bbox": [ + 114, + 215, + 171, + 227 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jie Liang", + "bbox": [ + 114, + 229, + 176, + 243 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jie Yang", + "bbox": [ + 114, + 244, + 173, + 257 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jim Logan", + "bbox": [ + 114, + 258, + 184, + 270 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jimit Majmudar", + "bbox": [ + 114, + 271, + 220, + 284 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jing Liu", + "bbox": [ + 114, + 285, + 169, + 297 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jinghong Miao", + "bbox": [ + 114, + 299, + 214, + 311 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jingru Yi", + "bbox": [ + 114, + 313, + 176, + 325 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jingyang Jin", + "bbox": [ + 114, + 327, + 197, + 339 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiun-Yu Kao", + "bbox": [ + 114, + 340, + 197, + 352 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jixuan Wang", + "bbox": [ + 114, + 354, + 200, + 367 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jiyang Wang", + "bbox": [ + 114, + 369, + 200, + 381 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Joe Pemberton", + "bbox": [ + 114, + 382, + 212, + 393 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Joel Carlson", + "bbox": [ + 114, + 396, + 196, + 407 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Joey Blundell", + "bbox": [ + 114, + 409, + 205, + 421 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "John Chin-Jew", + "bbox": [ + 114, + 422, + 212, + 435 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "John He", + "bbox": [ + 114, + 436, + 169, + 448 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jonathan Ho", + "bbox": [ + 114, + 450, + 197, + 463 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jonathan Hueser", + "bbox": [ + 114, + 465, + 225, + 476 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jonathan Lunt", + "bbox": [ + 114, + 478, + 209, + 489 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jooyoung Lee", + "bbox": [ + 114, + 492, + 207, + 503 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Joshua Tan", + "bbox": [ + 114, + 506, + 189, + 518 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Joyjit Chatterjee", + "bbox": [ + 114, + 520, + 223, + 532 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Judith Gaspers", + "bbox": [ + 114, + 534, + 212, + 546 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jue Wang", + "bbox": [ + 114, + 547, + 179, + 560 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jun Fang", + "bbox": [ + 114, + 561, + 174, + 574 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jun Tang", + "bbox": [ + 114, + 575, + 174, + 587 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jun Wan", + "bbox": [ + 114, + 589, + 173, + 599 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jun Wu", + "bbox": [ + 114, + 602, + 166, + 613 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Junlei Wang", + "bbox": [ + 114, + 616, + 197, + 628 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Junyi Shi", + "bbox": [ + 114, + 630, + 178, + 642 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Justin Chiu", + "bbox": [ + 114, + 643, + 189, + 655 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Justin Satriano", + "bbox": [ + 114, + 657, + 212, + 669 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Justin Yee", + "bbox": [ + 114, + 671, + 183, + 681 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jwala Dhamala", + "bbox": [ + 114, + 685, + 215, + 696 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Jyoti Bansal", + "bbox": [ + 114, + 699, + 196, + 710 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kai Zhen", + "bbox": [ + 114, + 712, + 176, + 724 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kai-Wei Chang", + "bbox": [ + 114, + 726, + 217, + 739 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kaixiang Lin", + "bbox": [ + 114, + 739, + 202, + 753 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kalyan Raman", + "bbox": [ + 114, + 753, + 212, + 766 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kanthashree Mysore Sathyendra", + "bbox": [ + 114, + 768, + 328, + 780 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karabo Moroe", + "bbox": [ + 114, + 781, + 212, + 792 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karan Bhandarkar", + "bbox": [ + 114, + 795, + 236, + 806 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karan Kothari", + "bbox": [ + 114, + 809, + 209, + 820 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karolina Owczarzak", + "bbox": [ + 114, + 823, + 251, + 834 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karthick Gopalswamy", + "bbox": [ + 114, + 835, + 263, + 849 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karthick Ravi", + "bbox": [ + 114, + 849, + 207, + 861 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karthik Ramakrishnan", + "bbox": [ + 114, + 864, + 264, + 875 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Karthika Arumugam", + "bbox": [ + 114, + 878, + 251, + 890 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kartik Mehta", + "bbox": [ + 114, + 891, + 204, + 902 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Katarzyna Konczalska", + "bbox": [ + 375, + 92, + 524, + 104 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kavya Ravikumar", + "bbox": [ + 375, + 106, + 495, + 118 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ke Tran", + "bbox": [ + 375, + 119, + 429, + 131 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kochen Qin", + "bbox": [ + 375, + 133, + 454, + 146 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kelin Li", + "bbox": [ + 375, + 148, + 431, + 157 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kelvin Li", + "bbox": [ + 375, + 161, + 439, + 172 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ketan Kulkarni", + "bbox": [ + 375, + 175, + 477, + 186 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kevin Angelo Rodrigues", + "bbox": [ + 375, + 189, + 539, + 202 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Keyur Patel", + "bbox": [ + 375, + 203, + 452, + 215 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Khadige Abboud", + "bbox": [ + 375, + 215, + 488, + 228 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kiana Hajebi", + "bbox": [ + 375, + 229, + 462, + 243 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Klaus Reiter", + "bbox": [ + 375, + 244, + 459, + 255 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kris Schultz", + "bbox": [ + 375, + 257, + 457, + 268 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Krishna Anisetty", + "bbox": [ + 375, + 271, + 488, + 284 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Krishna Kotnana", + "bbox": [ + 375, + 285, + 486, + 297 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kristen Li", + "bbox": [ + 375, + 299, + 444, + 310 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kruthi Channamallikarjuna", + "bbox": [ + 375, + 313, + 555, + 325 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Krzysztof Jakubczyk", + "bbox": [ + 375, + 327, + 514, + 339 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kuba Pierewoj", + "bbox": [ + 375, + 340, + 473, + 353 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kunal Pal", + "bbox": [ + 375, + 354, + 441, + 364 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kunwar Srivastav", + "bbox": [ + 375, + 368, + 493, + 378 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Kyle Bannerman", + "bbox": [ + 375, + 382, + 486, + 393 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lahari Poddar", + "bbox": [ + 375, + 395, + 470, + 407 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lakshmi Prasad", + "bbox": [ + 375, + 409, + 480, + 421 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Larry Tseng", + "bbox": [ + 375, + 422, + 457, + 435 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Laxmikant Naik", + "bbox": [ + 375, + 436, + 483, + 449 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Leena Chennuru Vankadara", + "bbox": [ + 375, + 450, + 558, + 462 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lenon Minorics", + "bbox": [ + 375, + 465, + 480, + 476 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Leo Liu", + "bbox": [ + 375, + 478, + 429, + 489 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Leonard Lausen", + "bbox": [ + 375, + 492, + 482, + 503 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Leonardo F. R. Ribeiro", + "bbox": [ + 375, + 506, + 527, + 517 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Li Zhang", + "bbox": [ + 375, + 520, + 437, + 531 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lili Gehorsam", + "bbox": [ + 375, + 534, + 472, + 544 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ling Qi", + "bbox": [ + 375, + 547, + 428, + 559 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lisa Bauer", + "bbox": [ + 375, + 561, + 447, + 571 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lori Knapp", + "bbox": [ + 375, + 575, + 452, + 587 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lu Zeng", + "bbox": [ + 375, + 590, + 431, + 601 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lucas Tong", + "bbox": [ + 375, + 603, + 452, + 614 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Lulu Wong", + "bbox": [ + 375, + 617, + 450, + 628 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Luoxin Chen", + "bbox": [ + 375, + 631, + 462, + 641 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Maciej Rudnicki", + "bbox": [ + 375, + 643, + 485, + 656 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mahdi Namazifar", + "bbox": [ + 375, + 657, + 491, + 669 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mahesh Jaliminche", + "bbox": [ + 375, + 671, + 503, + 681 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Maira Ladeira Tanke", + "bbox": [ + 375, + 685, + 513, + 696 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Manasi Gupta", + "bbox": [ + 375, + 699, + 468, + 710 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mandeep Ahlawat", + "bbox": [ + 375, + 712, + 496, + 724 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mani Khanuja", + "bbox": [ + 375, + 726, + 470, + 738 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mani Sundaram", + "bbox": [ + 375, + 739, + 480, + 752 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Marcin Leyk", + "bbox": [ + 375, + 753, + 460, + 766 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mariusz Momotko", + "bbox": [ + 375, + 768, + 500, + 779 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Markus Boese", + "bbox": [ + 375, + 781, + 470, + 792 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Markus Dreyer", + "bbox": [ + 375, + 795, + 475, + 806 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Markus Mueller", + "bbox": [ + 375, + 809, + 483, + 820 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mason Fu", + "bbox": [ + 375, + 823, + 442, + 834 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mateusz Górski", + "bbox": [ + 375, + 835, + 480, + 848 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mateusz Mastalerczyk", + "bbox": [ + 375, + 849, + 524, + 862 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Matias Mora", + "bbox": [ + 375, + 864, + 460, + 875 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Matt Johnson", + "bbox": [ + 375, + 878, + 465, + 888 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Matt Scott", + "bbox": [ + 375, + 891, + 444, + 902 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Matthew Wen", + "bbox": [ + 635, + 92, + 728, + 104 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Max Barysau", + "bbox": [ + 635, + 106, + 723, + 119 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Maya Bouerdassi", + "bbox": [ + 635, + 119, + 763, + 132 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Maya Krishnan", + "bbox": [ + 635, + 133, + 736, + 146 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mayank Gupta", + "bbox": [ + 635, + 148, + 733, + 160 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mayank Hirani", + "bbox": [ + 635, + 162, + 735, + 174 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mayank Kulkarni", + "bbox": [ + 635, + 175, + 751, + 186 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Meganathan Narayanasamy", + "bbox": [ + 635, + 189, + 818, + 202 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Melanie Bradford", + "bbox": [ + 635, + 203, + 754, + 214 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Melanie Gens", + "bbox": [ + 635, + 215, + 728, + 227 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Melissa Burke", + "bbox": [ + 635, + 229, + 730, + 241 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Meng Jin", + "bbox": [ + 635, + 244, + 697, + 256 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Miao Chen", + "bbox": [ + 635, + 258, + 709, + 268 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Michael Denkowski", + "bbox": [ + 635, + 271, + 767, + 282 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Michael Heymel", + "bbox": [ + 635, + 285, + 746, + 297 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Michael Krestyaninov", + "bbox": [ + 635, + 299, + 782, + 311 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Michal Obirek", + "bbox": [ + 635, + 313, + 733, + 324 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Michalina Wichorowska", + "bbox": [ + 635, + 327, + 797, + 338 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Michal Miotk", + "bbox": [ + 635, + 340, + 727, + 351 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Milosz Watroba", + "bbox": [ + 635, + 354, + 741, + 364 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mingyi Hong", + "bbox": [ + 635, + 368, + 725, + 381 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mingzhi Yu", + "bbox": [ + 635, + 382, + 715, + 393 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Miranda Liu", + "bbox": [ + 635, + 396, + 718, + 407 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mohamed Gouda", + "bbox": [ + 635, + 409, + 751, + 421 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mohammad El-Shabani", + "bbox": [ + 635, + 422, + 792, + 435 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mohammad Ghavamzadeh", + "bbox": [ + 635, + 436, + 812, + 448 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Mohit Bansal", + "bbox": [ + 635, + 450, + 725, + 462 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Morteza Ziyadi", + "bbox": [ + 635, + 465, + 738, + 476 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nan Xia", + "bbox": [ + 635, + 479, + 692, + 489 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nathan Susanj", + "bbox": [ + 635, + 492, + 732, + 503 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nav Bhasin", + "bbox": [ + 635, + 506, + 712, + 517 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Neha Goswami", + "bbox": [ + 635, + 520, + 738, + 531 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nehal Belgamwar", + "bbox": [ + 635, + 532, + 754, + 545 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nicolas Anastassacos", + "bbox": [ + 635, + 547, + 777, + 559 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nicolas Bergeron", + "bbox": [ + 635, + 561, + 750, + 573 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nidhi Jain", + "bbox": [ + 635, + 575, + 704, + 585 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nihal Jain", + "bbox": [ + 635, + 589, + 704, + 599 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Niharika Chopparapu", + "bbox": [ + 635, + 603, + 777, + 614 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nik Xu", + "bbox": [ + 635, + 617, + 684, + 627 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nikko Strom", + "bbox": [ + 635, + 630, + 722, + 641 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nikolaos Malandrakis", + "bbox": [ + 635, + 643, + 781, + 655 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nimisha Mishra", + "bbox": [ + 635, + 657, + 743, + 667 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ninad Parkhi", + "bbox": [ + 635, + 671, + 723, + 681 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Ninareh Mehrabi", + "bbox": [ + 635, + 685, + 750, + 696 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nishita Sant", + "bbox": [ + 635, + 699, + 718, + 709 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nishtha Gupta", + "bbox": [ + 635, + 712, + 732, + 724 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nitesh Sekhar", + "bbox": [ + 635, + 726, + 728, + 738 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nithin Rajeev", + "bbox": [ + 635, + 739, + 728, + 753 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nithish Raja Chidambaram", + "bbox": [ + 635, + 753, + 815, + 766 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Nitish Dhar", + "bbox": [ + 635, + 768, + 714, + 777 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Noor Bhagwagar", + "bbox": [ + 635, + 781, + 750, + 794 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Noy Konforty", + "bbox": [ + 635, + 795, + 728, + 808 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Omar Babu", + "bbox": [ + 635, + 809, + 712, + 819 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Omid Razavi", + "bbox": [ + 635, + 823, + 723, + 834 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Orchid Majumder", + "bbox": [ + 635, + 835, + 754, + 849 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Osama Dar", + "bbox": [ + 635, + 849, + 710, + 861 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Oscar Hsu", + "bbox": [ + 635, + 864, + 705, + 875 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Pablo Kvitca", + "bbox": [ + 635, + 878, + 722, + 888 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Pallavi Pandey", + "bbox": [ + 635, + 891, + 733, + 904 + ], + "page_idx": 44 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 44 + }, + { + "type": "text", + "text": "Parker Seegmiller", + "bbox": [ + 112, + 90, + 235, + 104 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Patrick Lange", + "bbox": [ + 114, + 106, + 207, + 119 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Paul Ferraro", + "bbox": [ + 114, + 119, + 199, + 132 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Payal Motwani", + "bbox": [ + 114, + 133, + 215, + 146 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pegah Kharazmi", + "bbox": [ + 114, + 147, + 225, + 160 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pei Wang", + "bbox": [ + 114, + 161, + 179, + 174 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pengfei Liu", + "bbox": [ + 114, + 175, + 194, + 186 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Peter Bradtke", + "bbox": [ + 114, + 189, + 205, + 200 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Peter Gotoz", + "bbox": [ + 114, + 202, + 186, + 214 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Peter Zhou", + "bbox": [ + 114, + 215, + 189, + 227 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pichao Wang", + "bbox": [ + 114, + 229, + 202, + 243 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Piotr Poskart", + "bbox": [ + 114, + 244, + 202, + 256 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pooja Sonawane", + "bbox": [ + 114, + 257, + 225, + 270 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pradeep Natarajan", + "bbox": [ + 114, + 271, + 236, + 284 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pradyun Ramadorai", + "bbox": [ + 114, + 285, + 246, + 297 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pralam Shah", + "bbox": [ + 114, + 299, + 200, + 311 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prasad Nirantar", + "bbox": [ + 114, + 313, + 218, + 324 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prasanthi Chavali", + "bbox": [ + 114, + 325, + 232, + 338 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prashan Wanigasekara", + "bbox": [ + 114, + 340, + 264, + 353 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prashant Saraf", + "bbox": [ + 114, + 354, + 212, + 364 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prashun Dey", + "bbox": [ + 114, + 367, + 200, + 381 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Pratyush Pant", + "bbox": [ + 114, + 382, + 207, + 393 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prerak Pradhan", + "bbox": [ + 114, + 395, + 215, + 407 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Preyaa Patel", + "bbox": [ + 114, + 409, + 197, + 422 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Priyanka Dadlani", + "bbox": [ + 114, + 422, + 230, + 435 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Prudhvee Narasimha Sadha", + "bbox": [ + 114, + 436, + 297, + 448 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Qi Dong", + "bbox": [ + 114, + 450, + 173, + 463 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Qian Hu", + "bbox": [ + 114, + 465, + 173, + 476 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Qiaozi (QZ) Gao", + "bbox": [ + 114, + 478, + 228, + 489 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Qing Liu", + "bbox": [ + 114, + 492, + 176, + 505 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Quinn Lam", + "bbox": [ + 114, + 506, + 191, + 518 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Quynh Do", + "bbox": [ + 114, + 520, + 184, + 531 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "R. Manmatha", + "bbox": [ + 114, + 532, + 205, + 544 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rachel Willis", + "bbox": [ + 114, + 546, + 205, + 558 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rafael Liu", + "bbox": [ + 114, + 561, + 186, + 571 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rafal Ellert", + "bbox": [ + 114, + 574, + 192, + 585 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rafal Kalinski", + "bbox": [ + 114, + 588, + 210, + 599 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rafi Al Attrach", + "bbox": [ + 114, + 602, + 217, + 614 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ragha Prasad", + "bbox": [ + 114, + 616, + 205, + 628 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ragini Prasad", + "bbox": [ + 114, + 628, + 207, + 642 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Raguvir Kunani", + "bbox": [ + 114, + 643, + 220, + 656 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rahul Gupta", + "bbox": [ + 114, + 657, + 200, + 670 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rahul Sharma", + "bbox": [ + 114, + 672, + 209, + 681 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rahul Tewari", + "bbox": [ + 114, + 684, + 204, + 696 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rajaganesh Baskaran", + "bbox": [ + 114, + 698, + 256, + 710 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rajan Singh", + "bbox": [ + 114, + 712, + 197, + 724 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rajiv Gupta", + "bbox": [ + 114, + 726, + 196, + 739 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rajiv Reddy", + "bbox": [ + 114, + 739, + 197, + 753 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rajshekhar Das", + "bbox": [ + 114, + 753, + 218, + 766 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rakesh Chada", + "bbox": [ + 114, + 767, + 210, + 779 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rakesh Vaideeswaran Mahesh", + "bbox": [ + 114, + 781, + 313, + 792 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ram Chandrasekaran", + "bbox": [ + 114, + 794, + 256, + 806 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ramesh Nallapati", + "bbox": [ + 114, + 809, + 233, + 821 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ran Xue", + "bbox": [ + 114, + 823, + 174, + 834 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rashmi Gangadharaiah", + "bbox": [ + 114, + 835, + 269, + 849 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ravi Rachakonda", + "bbox": [ + 114, + 849, + 232, + 862 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Renxian Zhang", + "bbox": [ + 114, + 864, + 217, + 877 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rexhina Blloshmi", + "bbox": [ + 114, + 878, + 233, + 888 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rishabh Agrawal", + "bbox": [ + 114, + 891, + 230, + 904 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Robert Enyedi", + "bbox": [ + 375, + 90, + 472, + 104 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Robert Lowe", + "bbox": [ + 375, + 106, + 462, + 118 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Robik Shrestha", + "bbox": [ + 375, + 119, + 477, + 131 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Robinson Piramuthu", + "bbox": [ + 375, + 133, + 511, + 145 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rohail Asad", + "bbox": [ + 375, + 147, + 457, + 159 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rohan Khanna", + "bbox": [ + 375, + 161, + 475, + 172 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rohan Mukherjee", + "bbox": [ + 375, + 175, + 493, + 188 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rohit Mittal", + "bbox": [ + 375, + 189, + 457, + 200 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rohit Prasad", + "bbox": [ + 375, + 202, + 460, + 214 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Rohith Mysore Vijaya Kumar", + "bbox": [ + 375, + 215, + 571, + 229 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ron Diamant", + "bbox": [ + 375, + 229, + 465, + 242 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ruchita Gupta", + "bbox": [ + 375, + 244, + 472, + 256 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ruiwen Li", + "bbox": [ + 375, + 257, + 446, + 268 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ruoying Li", + "bbox": [ + 375, + 271, + 450, + 284 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "RushabhFegade", + "bbox": [ + 375, + 285, + 485, + 297 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ruxu Zhang", + "bbox": [ + 375, + 299, + 457, + 311 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ryan Arbow", + "bbox": [ + 375, + 313, + 460, + 325 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ryan Chen", + "bbox": [ + 375, + 327, + 450, + 338 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ryan Gabbard", + "bbox": [ + 375, + 340, + 472, + 352 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ryan Hoium", + "bbox": [ + 375, + 354, + 460, + 364 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Ryan King", + "bbox": [ + 375, + 368, + 447, + 380 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sabarishkumar Iyer", + "bbox": [ + 375, + 382, + 504, + 393 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sachal Malick", + "bbox": [ + 375, + 395, + 470, + 407 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sahar Movaghati", + "bbox": [ + 375, + 409, + 488, + 421 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sai Balakavi", + "bbox": [ + 375, + 422, + 460, + 434 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sai Jakka", + "bbox": [ + 375, + 436, + 439, + 448 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sai Kashyap Paruvelli", + "bbox": [ + 375, + 450, + 521, + 463 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sai Muralidhar Jayanthi", + "bbox": [ + 375, + 464, + 532, + 477 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saicharan Shriram Mujumdar", + "bbox": [ + 375, + 478, + 571, + 489 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sainyam Kapoor", + "bbox": [ + 375, + 492, + 486, + 503 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sajjad Beygi", + "bbox": [ + 375, + 506, + 460, + 518 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saket Dingliwal", + "bbox": [ + 375, + 520, + 482, + 532 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saleh Soltan", + "bbox": [ + 375, + 534, + 459, + 544 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sam Ricklin", + "bbox": [ + 375, + 547, + 457, + 558 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sam Tucker", + "bbox": [ + 375, + 561, + 455, + 571 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sameer Sinha", + "bbox": [ + 375, + 574, + 467, + 585 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Samridhi Choudhary", + "bbox": [ + 375, + 588, + 513, + 601 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Samson Tan", + "bbox": [ + 375, + 603, + 457, + 613 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Samuel Broscheit", + "bbox": [ + 375, + 616, + 493, + 627 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Samuel Schulter", + "bbox": [ + 375, + 628, + 485, + 641 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sanchit Agarwal", + "bbox": [ + 375, + 643, + 486, + 656 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sandeep Atluri", + "bbox": [ + 375, + 657, + 473, + 670 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sander Valstar", + "bbox": [ + 375, + 672, + 472, + 681 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sanjana Shankar", + "bbox": [ + 375, + 685, + 486, + 696 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sanyukta Sanyukta", + "bbox": [ + 375, + 699, + 501, + 710 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sarthak Khanna", + "bbox": [ + 375, + 712, + 482, + 724 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sarvpriye Khetrapal", + "bbox": [ + 375, + 726, + 508, + 739 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Satish Janakiraman", + "bbox": [ + 375, + 739, + 503, + 752 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saumil Shah", + "bbox": [ + 375, + 753, + 460, + 765 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saurabh Akolkar", + "bbox": [ + 375, + 767, + 488, + 779 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saurabh Giri", + "bbox": [ + 375, + 781, + 460, + 792 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saurabh Khandelwal", + "bbox": [ + 375, + 794, + 513, + 806 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saurabh Pawar", + "bbox": [ + 375, + 809, + 475, + 820 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Saurabh Sahu", + "bbox": [ + 375, + 823, + 467, + 834 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sean Huang", + "bbox": [ + 375, + 835, + 455, + 849 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sejun Ra", + "bbox": [ + 375, + 849, + 436, + 862 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Senthilkumar Gopal", + "bbox": [ + 375, + 864, + 508, + 877 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sergei Dobroshinsky", + "bbox": [ + 375, + 878, + 513, + 890 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shadi Saba", + "bbox": [ + 375, + 891, + 450, + 902 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shamik Roy", + "bbox": [ + 635, + 90, + 718, + 104 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shamit Lal", + "bbox": [ + 635, + 106, + 709, + 117 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shankar Ananthakrishnan", + "bbox": [ + 635, + 119, + 805, + 131 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sharon Li", + "bbox": [ + 635, + 133, + 702, + 145 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shashwat Srijan", + "bbox": [ + 635, + 147, + 741, + 160 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shekhar Bhide", + "bbox": [ + 635, + 161, + 733, + 172 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sheng Long Tang", + "bbox": [ + 635, + 175, + 753, + 188 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sheng Zha", + "bbox": [ + 635, + 189, + 707, + 200 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sheree Oraby", + "bbox": [ + 635, + 203, + 733, + 215 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sherif Mostafa", + "bbox": [ + 635, + 215, + 733, + 228 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shiqi Li", + "bbox": [ + 635, + 229, + 691, + 242 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shishir Bharathi", + "bbox": [ + 635, + 244, + 743, + 255 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "ShivamPrakash", + "bbox": [ + 635, + 257, + 741, + 268 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shiyuan Huang", + "bbox": [ + 635, + 271, + 738, + 284 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shreya Yembarwar", + "bbox": [ + 635, + 285, + 761, + 297 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shreyas Pansare", + "bbox": [ + 635, + 299, + 743, + 311 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shreyas Subramanian", + "bbox": [ + 635, + 313, + 777, + 325 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shrijeet Joshi", + "bbox": [ + 635, + 327, + 725, + 339 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shuai Liu", + "bbox": [ + 635, + 340, + 700, + 352 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shuai Tang", + "bbox": [ + 635, + 354, + 710, + 366 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shubham Chandak", + "bbox": [ + 635, + 368, + 761, + 380 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shubham Garg", + "bbox": [ + 635, + 382, + 735, + 393 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shubham Katiyar", + "bbox": [ + 635, + 395, + 751, + 407 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shubham Mehta", + "bbox": [ + 635, + 409, + 745, + 421 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shubham Srivastav", + "bbox": [ + 635, + 422, + 761, + 435 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Shuo Yang", + "bbox": [ + 635, + 436, + 709, + 449 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Siddalingesha D S", + "bbox": [ + 635, + 450, + 758, + 463 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Siddharth Choudhary", + "bbox": [ + 635, + 465, + 777, + 477 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Siddharth Singh Senger", + "bbox": [ + 635, + 478, + 792, + 489 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Simon Babb", + "bbox": [ + 635, + 492, + 718, + 503 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sina Moeini", + "bbox": [ + 635, + 506, + 717, + 518 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Siqi Deng", + "bbox": [ + 635, + 520, + 702, + 532 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Siva Loganathan", + "bbox": [ + 635, + 534, + 746, + 545 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Slawomir Domagala", + "bbox": [ + 635, + 547, + 771, + 560 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sneha Narkar", + "bbox": [ + 635, + 561, + 725, + 571 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sneha Wadhwa", + "bbox": [ + 635, + 574, + 738, + 585 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Songyang Zhang", + "bbox": [ + 635, + 589, + 748, + 602 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Songyao Jiang", + "bbox": [ + 635, + 603, + 733, + 614 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sony Trenous", + "bbox": [ + 635, + 616, + 728, + 628 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Soumajyoti Sarkar", + "bbox": [ + 635, + 630, + 759, + 642 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Soumya Saha", + "bbox": [ + 635, + 643, + 725, + 656 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sourabh Reddy", + "bbox": [ + 635, + 657, + 738, + 670 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sourav Dokania", + "bbox": [ + 635, + 671, + 743, + 681 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Spurthideepika Sandiri", + "bbox": [ + 635, + 685, + 787, + 698 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Spyros Matsoukas", + "bbox": [ + 635, + 699, + 756, + 710 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sravan Bodapati", + "bbox": [ + 635, + 712, + 745, + 724 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sri Harsha Reddy Wdaru", + "bbox": [ + 635, + 726, + 800, + 739 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sridevi Yagati Venkateshdatta", + "bbox": [ + 635, + 739, + 831, + 752 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Srikanth Ronanki", + "bbox": [ + 635, + 753, + 751, + 765 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Srinivasan R Veeravanallur", + "bbox": [ + 635, + 767, + 813, + 779 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sriram Venkatapathy", + "bbox": [ + 635, + 781, + 774, + 794 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sriramprabhu Sankaraguru", + "bbox": [ + 635, + 795, + 813, + 808 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sruthi Gorantla", + "bbox": [ + 635, + 809, + 738, + 820 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sruthi Karuturi", + "bbox": [ + 635, + 823, + 735, + 834 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Stefan Schroedl", + "bbox": [ + 635, + 835, + 741, + 847 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Subendhu Rongali", + "bbox": [ + 635, + 849, + 758, + 863 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Subbasis Kundu", + "bbox": [ + 635, + 864, + 743, + 875 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Suhaila Shakiah", + "bbox": [ + 635, + 878, + 743, + 888 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sukriti Tiwari", + "bbox": [ + 635, + 891, + 728, + 902 + ], + "page_idx": 45 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 485, + 936, + 504, + 946 + ], + "page_idx": 45 + }, + { + "type": "text", + "text": "Sumit Bharti", + "bbox": [ + 112, + 90, + 202, + 104 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Sumita Sami", + "bbox": [ + 114, + 106, + 202, + 118 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Sumith Mathew", + "bbox": [ + 114, + 119, + 220, + 132 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Sunny Yu", + "bbox": [ + 114, + 133, + 181, + 146 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Sunwoo Kim", + "bbox": [ + 114, + 147, + 204, + 159 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Suraj Bajirao Malode", + "bbox": [ + 114, + 161, + 258, + 174 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Susana Cumplido Riel", + "bbox": [ + 114, + 175, + 264, + 188 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Swapnil Palod", + "bbox": [ + 114, + 188, + 210, + 200 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Swastik Roy", + "bbox": [ + 114, + 202, + 199, + 215 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Syed Furqhan", + "bbox": [ + 114, + 215, + 207, + 229 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tagyoung Chung", + "bbox": [ + 114, + 229, + 228, + 243 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Takuma Yoshitani", + "bbox": [ + 114, + 244, + 233, + 256 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Taojiannan Yang", + "bbox": [ + 114, + 257, + 225, + 271 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tejaswi Chillakura", + "bbox": [ + 114, + 272, + 238, + 284 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tejwant Bajwa", + "bbox": [ + 114, + 285, + 212, + 297 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Temi Lajumoke", + "bbox": [ + 114, + 299, + 218, + 311 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Thanh Tran", + "bbox": [ + 114, + 313, + 191, + 325 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Thomas Gueudre", + "bbox": [ + 114, + 327, + 228, + 338 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Thomas Jung", + "bbox": [ + 114, + 340, + 204, + 353 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tianhui Li", + "bbox": [ + 114, + 354, + 184, + 364 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tim Seemman", + "bbox": [ + 114, + 368, + 210, + 378 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Timothy Leffel", + "bbox": [ + 114, + 381, + 214, + 393 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tingting Xiang", + "bbox": [ + 114, + 395, + 215, + 407 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tirth Patel", + "bbox": [ + 114, + 409, + 184, + 421 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tobias Domhan", + "bbox": [ + 114, + 422, + 218, + 434 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tobias Falke", + "bbox": [ + 114, + 436, + 199, + 448 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Toby Guo", + "bbox": [ + 114, + 450, + 181, + 463 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tom Li", + "bbox": [ + 114, + 465, + 165, + 476 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tomasz Horsczaruk", + "bbox": [ + 114, + 478, + 253, + 489 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tomasz Jedynak", + "bbox": [ + 114, + 491, + 223, + 503 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tushar Kulkarni", + "bbox": [ + 114, + 505, + 222, + 518 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tyst Marin", + "bbox": [ + 114, + 518, + 187, + 531 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tytus Metrycki", + "bbox": [ + 114, + 532, + 215, + 546 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Tzu-Yen Wang", + "bbox": [ + 114, + 547, + 214, + 560 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Umang Jain", + "bbox": [ + 114, + 561, + 194, + 574 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Upendra Singh", + "bbox": [ + 114, + 575, + 214, + 587 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Utkarsh Chirimar", + "bbox": [ + 114, + 588, + 230, + 599 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vaibhav Gupta", + "bbox": [ + 114, + 601, + 214, + 614 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vanshil Shah", + "bbox": [ + 114, + 616, + 200, + 627 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varad Deshpande", + "bbox": [ + 114, + 628, + 230, + 642 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varad Gunjal", + "bbox": [ + 114, + 643, + 202, + 656 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varsha Srikeshava", + "bbox": [ + 114, + 657, + 235, + 669 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varsha Vivek", + "bbox": [ + 114, + 670, + 202, + 681 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varun Bharadwaj", + "bbox": [ + 114, + 684, + 228, + 698 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varun Gangal", + "bbox": [ + 114, + 699, + 205, + 710 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Varun Kumar", + "bbox": [ + 114, + 712, + 204, + 724 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Venkatesh Elango", + "bbox": [ + 114, + 726, + 233, + 739 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vicente Ordonez", + "bbox": [ + 114, + 739, + 225, + 751 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Victor Soto", + "bbox": [ + 114, + 753, + 191, + 763 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vignesh Radhakrishnan", + "bbox": [ + 374, + 90, + 532, + 104 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vihang Patel", + "bbox": [ + 374, + 106, + 460, + 118 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vikram Singh", + "bbox": [ + 374, + 119, + 467, + 133 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vinay Varma Kolanuvada", + "bbox": [ + 374, + 133, + 544, + 146 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vinayshekhar Bannihatti Kumar", + "bbox": [ + 374, + 147, + 586, + 160 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vincent Auvray", + "bbox": [ + 374, + 161, + 480, + 174 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vincent Cartillier", + "bbox": [ + 374, + 175, + 491, + 186 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vincent Ponzo", + "bbox": [ + 374, + 188, + 473, + 200 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Violet Peng", + "bbox": [ + 374, + 202, + 452, + 215 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vishal Khandelwal", + "bbox": [ + 374, + 215, + 501, + 228 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vishal Naik", + "bbox": [ + 374, + 229, + 454, + 241 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vishvesh Sahasrabudhe", + "bbox": [ + 374, + 243, + 529, + 255 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vitaliy Korolev", + "bbox": [ + 374, + 257, + 477, + 270 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vivek Gokuladas", + "bbox": [ + 374, + 271, + 488, + 282 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vivek Madan", + "bbox": [ + 374, + 285, + 464, + 297 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vivek Subramanian", + "bbox": [ + 374, + 299, + 504, + 310 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Volkan Cevher", + "bbox": [ + 374, + 311, + 473, + 325 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Vrinda Gupta", + "bbox": [ + 374, + 325, + 465, + 339 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wael Hamza", + "bbox": [ + 374, + 340, + 460, + 352 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wei Zhang", + "bbox": [ + 374, + 354, + 447, + 366 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Weitong Ruan", + "bbox": [ + 374, + 368, + 470, + 380 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Weiwei Cheng", + "bbox": [ + 374, + 381, + 473, + 393 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wen Zhang", + "bbox": [ + 374, + 395, + 452, + 407 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wenbo Zhao", + "bbox": [ + 374, + 409, + 459, + 421 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wenyan Yao", + "bbox": [ + 374, + 422, + 459, + 435 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wenzhuo Ouyang", + "bbox": [ + 374, + 436, + 493, + 450 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wesley Dashner", + "bbox": [ + 374, + 450, + 482, + 463 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "William Campbell", + "bbox": [ + 374, + 464, + 496, + 477 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "William Lin", + "bbox": [ + 374, + 478, + 455, + 489 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Willian Martin", + "bbox": [ + 374, + 491, + 473, + 503 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Wyatt Pearson", + "bbox": [ + 374, + 505, + 468, + 518 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiang Jiang", + "bbox": [ + 374, + 518, + 455, + 532 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiangxing Lu", + "bbox": [ + 374, + 532, + 468, + 546 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiangyang Shi", + "bbox": [ + 374, + 547, + 473, + 560 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xianwen Peng", + "bbox": [ + 374, + 561, + 470, + 574 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiaofeng Gao", + "bbox": [ + 374, + 575, + 468, + 587 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiaoge Jiang", + "bbox": [ + 374, + 588, + 462, + 601 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiaohan Fei", + "bbox": [ + 374, + 602, + 457, + 614 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiaohui Wang", + "bbox": [ + 374, + 616, + 470, + 628 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xiaozhou Joey Zhou", + "bbox": [ + 374, + 628, + 511, + 642 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xin Feng", + "bbox": [ + 374, + 643, + 437, + 656 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xinyan Zhao", + "bbox": [ + 374, + 657, + 460, + 670 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xinyao Wang", + "bbox": [ + 374, + 671, + 465, + 684 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xinyu Li", + "bbox": [ + 374, + 685, + 434, + 698 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xu Zhang", + "bbox": [ + 374, + 699, + 442, + 712 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xuan Wang", + "bbox": [ + 374, + 713, + 452, + 724 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xuandi Fu", + "bbox": [ + 374, + 726, + 446, + 738 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xueling Yuan", + "bbox": [ + 374, + 739, + 467, + 753 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Xuning Wang", + "bbox": [ + 374, + 753, + 467, + 767 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yadunandana Rao", + "bbox": [ + 633, + 90, + 754, + 104 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yair Tavizon", + "bbox": [ + 633, + 106, + 720, + 118 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yan Rossiytsev", + "bbox": [ + 633, + 119, + 736, + 133 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yanbei Chen", + "bbox": [ + 633, + 133, + 720, + 145 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yang Liu", + "bbox": [ + 633, + 147, + 697, + 160 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yang Zou", + "bbox": [ + 633, + 161, + 700, + 174 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yangsook Park", + "bbox": [ + 633, + 175, + 733, + 186 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yannick Versley", + "bbox": [ + 633, + 188, + 743, + 202 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yanyan Zhang", + "bbox": [ + 633, + 203, + 730, + 215 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yash Patel", + "bbox": [ + 633, + 215, + 705, + 228 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yen-Cheng Lu", + "bbox": [ + 633, + 229, + 732, + 243 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yi Pan", + "bbox": [ + 633, + 244, + 679, + 255 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yi-Hsiang (Sean) Lai", + "bbox": [ + 633, + 257, + 774, + 270 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yichen Hu", + "bbox": [ + 633, + 271, + 707, + 282 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yida Wang", + "bbox": [ + 633, + 285, + 709, + 297 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yiheng Zhou", + "bbox": [ + 633, + 299, + 722, + 311 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yilin Xiang", + "bbox": [ + 633, + 313, + 712, + 325 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Ying Shi", + "bbox": [ + 633, + 327, + 694, + 339 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Ying Wang", + "bbox": [ + 633, + 340, + 709, + 353 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yishai Galatzer", + "bbox": [ + 633, + 354, + 736, + 366 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yongxin Wang", + "bbox": [ + 633, + 368, + 733, + 381 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yorick Shen", + "bbox": [ + 633, + 382, + 715, + 393 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yuchen Sun", + "bbox": [ + 633, + 395, + 714, + 407 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yudi Purwatama", + "bbox": [ + 633, + 409, + 745, + 421 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yue (Rex) Wu", + "bbox": [ + 633, + 422, + 730, + 435 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yue Gu", + "bbox": [ + 633, + 436, + 684, + 448 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yuechun Wang", + "bbox": [ + 633, + 450, + 735, + 464 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yujun Zeng", + "bbox": [ + 633, + 465, + 712, + 478 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yuncong Chen", + "bbox": [ + 633, + 479, + 733, + 489 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yunke Zhou", + "bbox": [ + 633, + 491, + 717, + 503 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yusheng Xie", + "bbox": [ + 633, + 505, + 720, + 518 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Yvon Guy", + "bbox": [ + 633, + 518, + 702, + 532 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zbigniew Ambrozinski", + "bbox": [ + 633, + 532, + 787, + 546 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhaowei Cai", + "bbox": [ + 633, + 547, + 720, + 559 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhen Zhang", + "bbox": [ + 633, + 561, + 717, + 574 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zheng Wang", + "bbox": [ + 633, + 575, + 720, + 588 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhenghui Jin", + "bbox": [ + 633, + 589, + 723, + 601 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhewei Zhao", + "bbox": [ + 633, + 602, + 723, + 614 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhiheng Li", + "bbox": [ + 633, + 616, + 710, + 628 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhiheng Luo", + "bbox": [ + 633, + 628, + 722, + 642 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhikang Zhang", + "bbox": [ + 633, + 643, + 736, + 657 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhilin Fang", + "bbox": [ + 633, + 657, + 714, + 670 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhiqi Bu", + "bbox": [ + 633, + 671, + 696, + 683 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhiyuan Wang", + "bbox": [ + 633, + 685, + 733, + 698 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zhizhong Li", + "bbox": [ + 633, + 699, + 718, + 710 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zijian Wang", + "bbox": [ + 633, + 712, + 718, + 724 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zimeng (Chris) Qiu", + "bbox": [ + 633, + 726, + 767, + 739 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Zishi Li", + "bbox": [ + 633, + 739, + 689, + 751 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "D.2 Acknowledgements", + "text_level": 1, + "bbox": [ + 112, + 787, + 292, + 803 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "We would like to acknowledge the following individuals who supported the development of the Nova models and services during the Nova program.", + "bbox": [ + 111, + 811, + 883, + 842 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Abdelrahman Badawy", + "bbox": [ + 112, + 856, + 263, + 869 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Abtin Rasoulian", + "bbox": [ + 114, + 871, + 223, + 883 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Adam Baranowski", + "bbox": [ + 114, + 883, + 238, + 897 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Aishwarya Kore", + "bbox": [ + 374, + 856, + 483, + 869 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Aishwarya Padmakumar", + "bbox": [ + 374, + 871, + 537, + 883 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Alain Krok", + "bbox": [ + 374, + 885, + 450, + 896 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Alex Mould", + "bbox": [ + 633, + 856, + 717, + 869 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Alex Sun", + "bbox": [ + 633, + 871, + 697, + 883 + ], + "page_idx": 46 + }, + { + "type": "text", + "text": "Alexandros Papangelis", + "bbox": [ + 633, + 883, + 785, + 898 + ], + "page_idx": 46 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 46 + }, + { + "type": "table", + "img_path": "images/104d1d3b30677a61aa977e3bfb470ed7db16f34aecc77a8a5a3e3f519f29ea89.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Alfred Shen
Amaran Asokkumar
Amiya Chakraborty
Anastasios Alexandridis
Angeliki Metallinou
Anila Joshi
Anup Katariya
Arda Keskiner
Avinash Venkatagiri
Aya Elzoheiry
Baishali Chaudhury
Ben Friebe
Bigad Soleiman
Bob Li
Brad Porter
Brian Chou
Brian Yost
Burak Gozluklu
Chad Connally
Chris Azer
Chris Beauchene
Chris Greenwood
Chris Johnson
Clay Cheng
Craig Rowland
Di Jin
Di Wu
Diego Socolinsky
Don Kretsch
Dylan Martin
Emma Lister
Eva Lasarcyk
Evan Kravitz
Federico D'Alessio
Flora Wang
Francisco Calderon Rodriguez
Gamaleldin Elsayed
Gaurav Rele
Gaurav Sukhatme
Gourav Datta
Hadrien Glaude
Hanbo Wang
Hans Hoeijmaker
Haotian An
Harpreet Cheema
Harshit Pande
Hongbin Zheng
Huda Khayrallah
", + "bbox": [ + 112, + 90, + 321, + 753 + ], + "page_idx": 47 + }, + { + "type": "table", + "img_path": "images/f7dc2c775b272d735f1b4017fbebd6ba3845352c29ef471fa5da43d37bbacaae.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Isaac Privitera
Jacob Zhiyuan Fang
Jady Liu
Jae Oh Woo
Jamal Saboune
James Park
Jianbo Yuan
Jianwei Feng
Jie Li
Jinwoo Park
Johan Esbjourner
Jonathan makunga
JoonHyung Kim
Jorge Beltran
Jose Garrido Ramas
Julie Baca
Justin Lewis
Kamran Razi
Kangyan Liu
Kasana Mahesh
Kelvin Qian
Kyle Goehner
Kyle Saggar
Laith Al-Saadoon
Lei Sun
Lily Liao
Long Chen
Lukacs Ablonczy
Luke Luneau
Maciej Eichler
Mallory McManamo
Manju Arakere
Matt McCoy
Matthew Chang
Meghal Varia
Meghana Ashok
Melanie Li
Mifu Suzuki
Negin Sokhandan
Nick Biso
Nico Bishop
Nicolle Borges
Palash Goyal
Parker Coleman
Paul Sumarokov
Pavel Kveton
Philipp Lerche
Pratibha Kumari
", + "bbox": [ + 374, + 90, + 513, + 752 + ], + "page_idx": 47 + }, + { + "type": "table", + "img_path": "images/45df84ea256037b222c86788c17751b23921927fe22cd2beade0beab39a07079.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Rahul Agarwal
Rahul Ghosh
Rahul Kulkarni
Raj Kumar
Ramana Keerthi
Rams Sundaram
Raymond Fang
Reethika Kesani
Ryan Razkenari
Sarath Krishnan
Scott Patten
Seokhwan Kim
Sepehr Eghbali
Sergey Pugachev
Sertan Alkan
Shailav Taneja
Sheamus Punch
Shikib Mehri
Shilpa Singh
Shraddha Ravishankar
Sijia Liu
Sitanshu Gupta
Sol Vesdapunt
Spencer Romo
Sravya Uppu
Srivani Kambhampati
Stephanie Xie
Sujitha Martin
Sungjin Lee
Sungmin Hong
Tanner McRae
Thomas Patterson
Tina Li
Tom Liang
Trong Nguyen
Vasudev Mahesh Purandare
Vidya Sagar Ravipati
Vu San Ha Huynh
Weijuan Wu
Xiaolong Li
Xinyi Xu
Yaroslav Nechaev
Yuan Tian
Yunfei Bai
Zach Hille
Ziyan Tian
", + "bbox": [ + 633, + 90, + 821, + 726 + ], + "page_idx": 47 + }, + { + "type": "header", + "text": "The Amazon Nova Family of Models", + "bbox": [ + 370, + 42, + 620, + 56 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 485, + 935, + 504, + 946 + ], + "page_idx": 47 + } +] \ No newline at end of file diff --git a/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_model.json b/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a67e97b6f4f0731b650dc4d3bc47b5422bec9dd4 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_model.json @@ -0,0 +1,16433 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.275, + 0.123, + 0.728, + 0.175 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models: Technical Report and Model Card" + }, + { + "type": "image_caption", + "bbox": [ + 0.362, + 0.226, + 0.637, + 0.242 + ], + "angle": 0, + "content": "Amazon Artificial General Intelligence" + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.279, + 0.468, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.346, + 0.885, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.499, + 0.885, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.344, + 0.596, + 0.655, + 0.612 + ], + "angle": 0, + "content": "Figure 1: The Amazon Nova family of models" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.663, + 0.54, + 0.679 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.694, + 0.828, + 0.848 + ], + "angle": 0, + "content": "We present Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance. Amazon Nova Pro is a highly-capable multimodal model with the best combination of accuracy, speed, and cost for a wide range of tasks. Amazon Nova Lite is a low-cost multimodal model that is lightning fast for processing images, video, documents and text. Amazon Nova Micro is a text-only model that delivers our lowest-latency responses at very low cost. Amazon Nova Canvas is an image generation model that creates professional grade images with rich customization controls. Amazon Nova Reel is a video generation model offering high-quality outputs, customization, and motion control. Our models were built responsibly and with a commitment to customer trust, security, and reliability. We report benchmarking results for core capabilities, agentic performance, long context, functional adaptation, runtime performance, and human evaluation." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.268, + 0.059, + 0.708 + ], + "angle": 270, + "content": "arXiv:2506.12103v1 [cs.AI] 17 Mar 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.195, + 0.107 + ], + "angle": 0, + "content": "Contents" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.118, + 0.885, + 0.132 + ], + "angle": 0, + "content": "1 Introduction 3" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.133, + 0.884, + 0.145 + ], + "angle": 0, + "content": "1.1 Amazon Nova Pro, Lite, and Micro 3" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.147, + 0.884, + 0.159 + ], + "angle": 0, + "content": "1.2 Amazon Nova Canvas and Reel 3" + }, + { + "type": "list", + "bbox": [ + 0.141, + 0.133, + 0.884, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.172, + 0.885, + 0.186 + ], + "angle": 0, + "content": "2 Amazon Nova Pro, Lite, and Micro Evaluations 5" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.187, + 0.884, + 0.199 + ], + "angle": 0, + "content": "2.1 Core capability public benchmarks 5" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.201, + 0.884, + 0.213 + ], + "angle": 0, + "content": "2.1.1 Core capability text benchmarks and results 5" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.214, + 0.884, + 0.227 + ], + "angle": 0, + "content": "2.1.2 Core capability multimodal benchmarks and results 7" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.201, + 0.884, + 0.227 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.228, + 0.884, + 0.241 + ], + "angle": 0, + "content": "2.2 Agentic workflows 8" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.242, + 0.884, + 0.255 + ], + "angle": 0, + "content": "2.2.1 Agentic text benchmarks and results 9" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.256, + 0.884, + 0.269 + ], + "angle": 0, + "content": "2.2.2 Agentic multimodal benchmarks and results 9" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.242, + 0.884, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.27, + 0.884, + 0.282 + ], + "angle": 0, + "content": "2.3 Long context 10" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.284, + 0.884, + 0.297 + ], + "angle": 0, + "content": "2.4 Functional expertise 11" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.298, + 0.884, + 0.31 + ], + "angle": 0, + "content": "2.4.1 Software engineering 12" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.311, + 0.884, + 0.324 + ], + "angle": 0, + "content": "2.4.2 Financial analysis 12" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.325, + 0.884, + 0.338 + ], + "angle": 0, + "content": "2.4.3 Retrieval augmented generation 12" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.298, + 0.884, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.339, + 0.884, + 0.352 + ], + "angle": 0, + "content": "2.5 Runtime performance 13" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.364, + 0.884, + 0.377 + ], + "angle": 0, + "content": "3 Amazon Nova Canvas Evaluation 15" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.379, + 0.884, + 0.391 + ], + "angle": 0, + "content": "3.1 Automated metrics 15" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.392, + 0.884, + 0.405 + ], + "angle": 0, + "content": "3.2 Human evaluation 15" + }, + { + "type": "list", + "bbox": [ + 0.14, + 0.379, + 0.884, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.418, + 0.885, + 0.431 + ], + "angle": 0, + "content": "4 Amazon Nova Reel Evaluation 16" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.433, + 0.884, + 0.445 + ], + "angle": 0, + "content": "4.1 Human evaluation metrics 16" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.446, + 0.884, + 0.459 + ], + "angle": 0, + "content": "4.2 Dataset 16" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.46, + 0.884, + 0.473 + ], + "angle": 0, + "content": "4.3 Implementation details & results 17" + }, + { + "type": "list", + "bbox": [ + 0.14, + 0.433, + 0.884, + 0.473 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.485, + 0.885, + 0.499 + ], + "angle": 0, + "content": "5 Responsible AI 17" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.5, + 0.884, + 0.514 + ], + "angle": 0, + "content": "5.1 Defining our RAI objectives 17" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.514, + 0.884, + 0.527 + ], + "angle": 0, + "content": "5.2 Ensuring adherence to RAI objectives 18" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.528, + 0.884, + 0.54 + ], + "angle": 0, + "content": "5.3 RAI Evaluation 19" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.542, + 0.884, + 0.555 + ], + "angle": 0, + "content": "5.4 Red Teaming 19" + }, + { + "type": "list", + "bbox": [ + 0.14, + 0.5, + 0.884, + 0.555 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.556, + 0.884, + 0.568 + ], + "angle": 0, + "content": "5.4.1 Internal Red Teaming 19" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.569, + 0.884, + 0.582 + ], + "angle": 0, + "content": "5.4.2 External Red Teaming 20" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.583, + 0.884, + 0.596 + ], + "angle": 0, + "content": "5.4.3 Automated Red Teaming 21" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.556, + 0.884, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.608, + 0.885, + 0.623 + ], + "angle": 0, + "content": "6 Training Infrastructure 21" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.634, + 0.884, + 0.649 + ], + "angle": 0, + "content": "A Amazon Nova Canvas Capabilities 28" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.66, + 0.884, + 0.676 + ], + "angle": 0, + "content": "B Prompts and Scoring 30" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.686, + 0.884, + 0.702 + ], + "angle": 0, + "content": "C Qualitative examples of multimodal intelligence 39" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.713, + 0.884, + 0.728 + ], + "angle": 0, + "content": "D Correspondence and Contributors 43" + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.634, + 0.884, + 0.728 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.502, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.254, + 0.106 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.123, + 0.884, + 0.152 + ], + "angle": 0, + "content": "This document introduces Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.17, + 0.4, + 0.185 + ], + "angle": 0, + "content": "1.1 Amazon Nova Pro, Lite, and Micro" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.197, + 0.533, + 0.212 + ], + "angle": 0, + "content": "Key capabilities of Amazon Nova Pro, Lite, and Micro include:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.226, + 0.884, + 0.351 + ], + "angle": 0, + "content": "- Frontier intelligence: Amazon Nova models possess frontier intelligence, enabling them to understand and process complex language tasks with state-of-the-art accuracy. Amazon Nova Micro sets new standards in its intelligence tier in several text benchmarks such as Language Understanding (MMLU), Deep Reasoning (GPQA), Mathematics (MATH), and Multi-step Reasoning (Big-Bench Hard). Our multimodal models, Amazon Nova Pro and Lite, take text, images, documents, and video as input and generate text as output. These models set standards in several benchmarks such as Video Captioning (VATEX), Visual QA (TextVQA), Function Calling (BFCL), and multimodal agentic benchmarks (GroundUI-1K, VisualWebBench, Mind2Web) in their respective intelligence tiers. These models are the first to offer video understanding capabilities on Amazon Bedrock, enabling deeper insights from multimedia content." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.358, + 0.881, + 0.386 + ], + "angle": 0, + "content": "- Speed: Amazon Nova has been designed for fast inference, with Amazon Micro, Lite, and Pro each being one of the fastest models in their respective intelligence tiers." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.393, + 0.881, + 0.449 + ], + "angle": 0, + "content": "- Agentic Workflows: Amazon Nova Pro, Lite, and Micro can power AI agents capable of breaking down and executing multi-step tasks. These models are integrated with Bedrock Knowledge Bases and they excel at retrieval-augmented generation (RAG) to ensure the best accuracy by grounding their responses to the developer's data." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.456, + 0.884, + 0.499 + ], + "angle": 0, + "content": "- Customizability: Developers can fine-tune these models with multimodal data (Pro and Lite) or text data (Pro, Lite, and Micro), providing the flexibility to achieve desired accuracy, latency, and cost. Developers can also run self-service Custom Fine-Tuning (CFT) and distillation of larger models to smaller ones via Bedrock APIs." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.505, + 0.881, + 0.533 + ], + "angle": 0, + "content": "- Price-Performance: Each model was optimized to deliver exceptional price-performance value, offering state-of-the-art performance on key benchmarks at low cost." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.226, + 0.884, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.548, + 0.885, + 0.687 + ], + "angle": 0, + "content": "Amazon Nova Pro, Lite, and Micro are based on the Transformer architecture [74]. Each model went through a series of training processes that began with pretraining using a mixture of large amounts of multilingual and multimodal data. Our models were trained on data from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. We curated data from over 200 languages, with particular emphasis on Arabic, Dutch, English, French, German, Hebrew, Hindi, Italian, Japanese, Korean, Portuguese, Russian, Simplified Chinese, Spanish, and Turkish. After pretraining, models iteratively went through a series of fine-tuning stages, including Supervised Fine-Tuning (SFT) on instruction-demonstration pairs (including multimodal ones) and reward model (RM) training from human preference data [59]. Finally, the models learned from human preferences via methods like Direct Preference Optimization (DPO) [62] and Proximal Policy Optimization (PPO) [68] to ensure that the final models are aligned with human preferences in both quality and responsibility." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.705, + 0.375, + 0.719 + ], + "angle": 0, + "content": "1.2 Amazon Nova Canvas and Reel" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.732, + 0.884, + 0.761 + ], + "angle": 0, + "content": "Amazon Nova Canvas and Amazon Nova Reel are designed to create realistic multimodal content, including images and videos, for a wide range of applications such as advertising, marketing, and entertainment." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.766, + 0.781, + 0.781 + ], + "angle": 0, + "content": "Amazon Nova Canvas offers the following functionalities, with more details provided in Appendix A:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.794, + 0.881, + 0.848 + ], + "angle": 0, + "content": "- Text-to-image generation: Amazon Nova Canvas can generate images with various resolutions (from 512 up to 2K horizontal resolution) and aspect ratios (any aspect ratio between 1:4 and 4:1 with a maximum of 4.2M pixels). Customers can provide reference images to guide the model to generate outputs in a specific style or color palette, or to generate variations of an image." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.856, + 0.881, + 0.911 + ], + "angle": 0, + "content": "- Image editing: Amazon Nova Canvas allows precise image editing operations like inpainting and outpainting through natural language mask prompts. These mask prompts describe the specific area of the input image that needs to be repaired. The user can also easily change a background with the background removal feature leaving the subject of the image unchanged." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.794, + 0.881, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.502, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.092, + 0.481, + 0.107 + ], + "angle": 0, + "content": "Amazon Nova Reel offers the following functionalities:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.118, + 0.88, + 0.145 + ], + "angle": 0, + "content": "- Generate videos from a text prompt: Amazon Nova Reel can generate high-quality videos of 6-second duration (720p resolution at 24 frames per second) from a text prompt." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.15, + 0.881, + 0.177 + ], + "angle": 0, + "content": "- Generate videos from a reference image and a prompt: Amazon Nova Reel brings images to motion and generates videos that are guided by the input image and a text prompt." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.183, + 0.881, + 0.238 + ], + "angle": 0, + "content": "- Camera motion control using a text prompt: With camera motion control in Amazon Nova Reel, the user can guide camera motion with text prompts like \"zoom\" and \"dolly forward\" to get the exact visual needed for each video. Amazon Nova Reel supports more than 20 camera motions. For more details, please refer to our prompting guide1." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.118, + 0.881, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.25, + 0.885, + 0.376 + ], + "angle": 0, + "content": "Amazon Nova Canvas and Reel are latent diffusion models [61] where a Variational AutoEncoder (VAE) [41] maps the image or video frames to latent variables on which the diffusion process happens. A text encoder tokenizes input text prompts into tokens which are then passed to the diffusion model as a conditioning signal. At inference time, a latent variable is initialized with random noise sampled from a Gaussian distribution, which is then denoised by the trained diffusion model iteratively into a clean latent variable. The clean latent variable is decoded back to images or video frames by the decoder of the VAE. Both models underwent a two-phased approach of pretraining and fine-tuning. Pretraining data were sourced from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. Our highly scalable data filtering, dedduplication, and enrichment pipelines were based on AWS EMR [2] and AWS Batch [1], as well as other AWS services." + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.897, + 0.522, + 0.912 + ], + "angle": 0, + "content": "tok/secMMLUARC-CDROPGPQAMATHGSM8kIFEvalBBHaccuracyaccuracyF1-scoreaccuracyaccuracyaccuracyinstruction-level loose accuracyaccuracyNova Pro10085.994.8±1.385.4±0.746.9±4.676.6±1.294.8±1.292.1±1.886.9Nova Lite15780.592.4±1.580.2±0.842.0±4.673.3±1.294.5±1.289.7±2.182.4Nova Micro21077.690.2±1.779.3±0.840.0±4.569.3±1.392.3±1.487.2±2.379.50-shot CoT0-shot6-shot CoT0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoTClaude 3.5 Sonnet (Oct)5789.396.3M±1.188.3±0.658.0M±4.678.3±1.196.5M±1.090.2*±2.093.2Claude 3.5 Haiku6480.390.9M±1.683.1±0.837.5M±4.569.4±1.393.8M±1.385.9*±2.486.60-shot CoT25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoTGemini 1.5 Pro (002)5885.995.4M±1.274.9±0.955.1M±4.686.5±0.990.8±1.691.7M±1.989.2Gemini 1.5 Flash (002)19078.994.3M±1.378.4±0.845.1M±4.677.9±1.286.2±1.991.6M±1.985.5Gemini 1.5 Flash 8B (001)28368.188.7M±1.868.1M±0.933.5M±4.458.7±1.484.5M±2.086.1M±2.369.55-shot25-shot3-shot0-shot4-shot11-shot0-shot3-shotGPT-4o16388.796.2M±1.183.4±0.748.4M±4.676.6±1.292.6M±1.489.8M±2.183.0MGPT-4o Mini11382.092.3M±1.579.7±0.841.7M±4.670.2±1.386.4M±1.887.4M±2.381.0M0-shot25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shotLlama 3.2 90B4086.094.8±1.3-46.7±4.668.0±1.395.1±1.290.9M±2.0-Llama 3.2 11B12473.083.4±2.1-32.8±4.351.9±1.484.5±2.085.0M±2.4-Llama 3.1 8B15773.083.4±2.1-30.4±4.351.9±1.484.5±2.085.0M±2.4-0-shot CoT25-shot-0-shot CoT0-shot CoT8-shot CoT--" + }, + { + "type": "table_footnote", + "bbox": [ + 0.112, + 0.763, + 0.885, + 0.861 + ], + "angle": 0, + "content": "Table 1: Quantitative results on core capability benchmarks (MMLU [36], ARC-C [22], DROP [26], GPQA [64], MATH [37]), GSM8K [23], IFEval [89] and BigBench-Hard (BBH) [72]). Unless otherwise noted, all reference numbers are taken from the original technical reports and websites for Claude models [14, 11], GPT4 models [58, 57], Llama models [45] and Gemini models [32]. Results marked with \\( M \\) were measured by \\( \\mathrm{us}^2 \\). Claude numbers for IFEval (taken from [14]) are marked with an asterisk (*), as the scoring methodology is unspecified in the report. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.502, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.162 + ], + "angle": 0, + "content": "Table 1 summarizes the quantitative results of Nova models and select public models on the aforementioned benchmarks for core capabilities. When available, we reference the highest publicly-reported numbers for each benchmark from the official technical reports and websites for Claude, Gemini, OpenAI and Llama family of models. Amazon Nova Pro, Lite, and Micro demonstrate strong performance across all benchmarks, showcasing their advanced core intelligence, particularly Amazon Nova Micro and Lite on math, reasoning, and instruction following benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.167, + 0.885, + 0.279 + ], + "angle": 0, + "content": "We also evaluate the translation capabilities of Nova models. Flores200 [73, 34, 35], or simply Flores, is a machine translation benchmark consisting of translations from 842 distinct web articles, which tests the translation capabilities between English and non-English languages. Sentences are 21 words long on average. We use a 0-shot setup and report the macro average of two metrics, spBleu and COMET22 score [63] across a set of languages (Arabic, German, Spanish, French, Hindi, Italian, Japanese, Korean, Portuguese, Hebrew, Turkish, Simplified Chinese, Russian, Dutch) for translation from and into English. The prompts used for evaluation are summarized in Appendix B.1. Table 2 summarizes our quantitative results on Flores, demonstrating strong multilingual performance on translation for Amazon Nova Pro, Lite, and Micro." + }, + { + "type": "table", + "bbox": [ + 0.152, + 0.288, + 0.849, + 0.597 + ], + "angle": 0, + "content": "
FLORES (0-shot)
en → Set1Set1 → en
tok/secspBleu (↑)COMET22 (↑)spBleu (↑)COMET22 (↑)
Nova Pro10043.489.144.489.0
Nova Lite15741.588.843.188.8
Nova Micro21040.288.542.688.7
Claude 3.5 Sonnet (Oct)5742.5M89.4M43.5M89.1M
Claude 3.5 Haiku6440.0M88.5M40.2M88.3M
Gemini 1.5 Pro (002)5743.0M*89.1M*45.6M*89.1M*
Gemini 1.5 Flash (002)19040.0M*88.5M*42.9M*88.8M*
Gemini 1.5 Flash 8B (001)28338.2M*88.0M*41.4M*88.5M*
GPT-4o16343.1M*89.2M*43.9M*89.0M*
GPT-4o Mini11341.1M*88.7M*41.9M*88.7M*
Llama 3.2 90B4039.7M88.2M43.7M88.5M
Llama 3.2 11B12433.0M85.7M36.3M86.3M
Llama 3.1 8B15732.7M85.5M36.5M86.5M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.601, + 0.889, + 0.659 + ], + "angle": 0, + "content": "Table 2: Quantitative results on Flores200 [34], a machine translation benchmark. Set1 refers to {de, es, fr, it, pt, ja, ar, hi, ru, nl, tr, he, ko, zh}. Results marked with \\( M \\) were measured by us. Results marked with an asterisk (*) were obtained using an alternate prompt which can be found in Appendix B.1 Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.689, + 0.53, + 0.705 + ], + "angle": 0, + "content": "2.1.2 Core capability multimodal benchmarks and results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.713, + 0.888, + 0.782 + ], + "angle": 0, + "content": "In this section we evaluate the multimodal capabilities of Amazon Nova models on a diverse set of public benchmarks. Our selection of multimodal benchmarks aims to probe for various capabilities, including natural image understanding, document understanding with charts and graphs, text understanding, and temporal reasoning in videos. For all benchmarks, we follow the suggested metrics and choice of data split for evaluation. The following list briefly describes the selected benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.792, + 0.884, + 0.835 + ], + "angle": 0, + "content": "- MMMU [85]: The Massive Multi-discipline Multimodal Understanding benchmark consists of college-level multiple-choice and open-ended questions from 30 different disciplines. We use Chain-of-Thought (CoT) prompting for this benchmark and report accuracy." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.838, + 0.885, + 0.881 + ], + "angle": 0, + "content": "ChartQA [50]: The 2,500 questions of this benchmark cover three different types of charts (bar, line and pie) and require strong visual, logical, and arithmetical reasoning capabilities. We evaluate on the test set and report relaxed accuracy." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.884, + 0.884, + 0.914 + ], + "angle": 0, + "content": "- DocVQA [51]: This benchmark probes capabilities on document analysis and recognition, including Optical Character Recognition (OCR). The 5,349 questions contain images from a diverse set of documents, ranging" + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.792, + 0.885, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.502, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.088, + 0.884, + 0.372 + ], + "angle": 0, + "content": "
MMMU (CoT)Chart QAcDoc VQAText VQAVATEXEgo Schema
tok/secvaltesttestvaltesttest
accuracyrelaxed accuracyANLSweighted accuracyCIDEraccuracy
Amazon Nova Pro10061.7 ±3.289.2 ±1.293.581.577.872.1 ±5.4
Amazon Nova Lite15756.2 ±3.286.8 ±1.392.480.277.871.4 ±5.4
Claude 3.5 Sonnet (Oct)5770.4 ±3.090.8 ±1.194.261.7M--
Claude 3 Haiku6450.2 ±3.382.0 ±1.588.8---
Gemini 1.5 Pro (001)5865.9 ±3.1E87.2 ±1.393.1B78.764.6A72.2 ±5.4
Gemini 1.5 Flash (001)19062.3 ±3.2E85.4 ±1.489.9B78.757.165.7 ±5.7
Gemini 1.5 Flash 8B (001)28353.7 ±3.3F78.2 ±1.6G73.666.753.2A-
GPT-4o (May)-69.1 ±3.085.7 ±1.492.877.2DM-72.2 ±5.4
GPT-4o Mini (Jul)11359.4 ±3.279.2 ±1.6M-70.3M--
Llama 3.2 90B4060.3 ±3.285.5 ±1.490.180.7M--
Llama 3.2 11B12450.7 ±3.383.4 ±1.588.471.3M--
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.111, + 0.376, + 0.884, + 0.501 + ], + "angle": 0, + "content": "Table 3: Quantitative results on four image understanding benchmarks (MMMU [85], ChartQA [50], DocVQA [51], TextVQA [70]) and 2 video understanding benchmarks (VATEX [78] and EgoSchema [49]). Higher numbers are better for all benchmarks \\((\\uparrow)\\). Unless otherwise noted, all evaluations are 0-shot and reference numbers are taken from the original technical reports and websites for Claude models [11, 12], GPT4 models [56, 55], Llama models [45, 53] and Gemini models [32, 33]. Remarks: (A) 4-shot evaluation; (B) External Optical Character Recognition (OCR) was used; (C) All models except Amazon Nova use CoT; (D) GPT-4o (Nov); (E) Gemini 1.5 Flash/Pro (002) models; (F) Reported in [33]; (G) Reported in [4]; (M) Claude 3.5 Sonnet and Llama 3.2 results for TextVQA as well as GPT4o and GPT4o mini results on ChartQA, TextVQA and VATEX were measured by us. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.538, + 0.884, + 0.566 + ], + "angle": 0, + "content": "from 1940 to 2020 and covering multiple industries. We report Average Normalized Levenshtein Similarity (ANLS)." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.572, + 0.881, + 0.601 + ], + "angle": 0, + "content": "- TextVQA [70]: The 5,000 samples of this dataset focus specifically on text-reading capabilities (OCR) in natural images. We report weighted accuracy on the validation set." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.607, + 0.881, + 0.647 + ], + "angle": 0, + "content": "- VATEX [78]: This video captioning benchmark covers a diverse set of human activities. We evaluate on the public test set containing videos with a length of around 10 seconds. The CIDEr [75] score is used for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.654, + 0.881, + 0.71 + ], + "angle": 0, + "content": "- EgoSchema [49]: The unique characteristic of this long-form video question answering benchmark is its high \"certificate length\" [15], which is, loosely speaking, the time it takes a human to verify the video description. The videos cover a broad range of natural human activities and come with human-curated multiple-choice question-answer pairs." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.572, + 0.881, + 0.71 + ], + "angle": 0, + "content": null + }, + { + "type": "table_footnote", + "bbox": [ + 0.111, + 0.722, + 0.884, + 0.78 + ], + "angle": 0, + "content": "Table 3 summarizes our quantitative results on multiple image and video understanding benchmarks. Amazon Nova Pro and Lite achieve high scores across all benchmarks. Chart understanding on ChartQA and video understanding on VATEX stand out, where Nova models rank either first or second. We provide the prompt templates for all benchmarks in Appendix B.2, as well as qualitative examples in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.795, + 0.284, + 0.81 + ], + "angle": 0, + "content": "2.2 Agentic workflows" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.822, + 0.884, + 0.865 + ], + "angle": 0, + "content": "Amazon Nova Pro, Lite, and Micro models can be used as agents. An agent considers a suite of tools and APIs, reasons about the user's request and past conversational history, chooses if a tool should be used and, if so, decides which tool to use, invokes the tool, assesses the outcome from the tool, and then communicates back with the user [83, 67, 46, 60]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.87, + 0.884, + 0.913 + ], + "angle": 0, + "content": "To this end, we evaluated our Nova models on agentic workflows that require textual understanding and visual reasoning. For textual understanding (Section 2.2.1), we used the Berkeley Function Calling Leaderboard benchmark to test our models' capabilities in function calling and orchestrating real-world applications. For visual reasoning (Section 2.2.2)," + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.502, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.123 + ], + "angle": 0, + "content": "we evaluate on three benchmarks that require image understanding capabilities for correct function calling. We highlight that both Amazon Nova Pro and Lite models set a new state of the art on these challenging benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.137, + 0.423, + 0.151 + ], + "angle": 0, + "content": "2.2.1 Agentic text benchmarks and results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.16, + 0.885, + 0.232 + ], + "angle": 0, + "content": "Table 4 presents quantitative results on the Berkeley Function Calling Leaderboard v3 (BFCL).3 Stemming from the Gorilla project [60], the revamped BFCL [81] benchmark evaluates a model's ability to accurately call and utilize real-world functions, or tools, based on a user's natural language request. Amazon Nova models particularly excel in the Abstract Syntax Tree (AST), Execution, and Relevance metrics, as well as overall scores versus comparable models. Amazon Nova Lite and Micro also had the lowest latency of the selected models." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.236, + 0.884, + 0.293 + ], + "angle": 0, + "content": "In Table 4, AST measures the exact match function calling performance of the model when comparing function names and argument/value signatures to a human-curated ground truth. While AST allows for some soft matching based on manually-defined, permitted argument values (e.g., different date formats), Execution measures a function call's accuracy not by the call signature itself, but by comparing the return value of the call when executed against a real API." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.299, + 0.884, + 0.356 + ], + "angle": 0, + "content": "To measure the rate of hallucination, Irrelevance measures the model's ability to recognize that it does not have the appropriate functions available to help the user, and should therefore not call any. Relevance, as the opposite of irrelevance, measures the model's ability to recognize it indeed does have the functions necessary to help the user (but does not verify function signature accuracy). For both metrics, higher numbers are better." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.368, + 0.887, + 0.647 + ], + "angle": 0, + "content": "
OverallLatencyNon-LiveLiveMulti-TurnHallucination
accuracy(↑)seconds(↓)AST(↑)execution(↑)overall(↑)overall(↑)relevance(↑)irrelevance(↑)
Nova Pro68.41.090.189.871.545.195.165.1
Nova Lite66.60.687.586.466.050.397.649.1
Nova Micro56.20.587.289.767.415.587.857.6
Claude Sonnet 3.5 (Jun)61.33.970.066.374.740.068.374.6
Claude Haiku 340.41.541.747.557.720.697.629.4
Gemini 1.5 Pro (002)59.83.088.091.474.316.375.675.1
Gemini 1.5 Flash (002)55.31.179.780.673.212.578.175.7
Llama 3.2 90BA54.3N/A88.989.361.114.392.758.4
Llama 3.2 11BA49.9N/A83.687.357.910.578.141.6
GPT-4o (Aug)68.91.585.985.675.445.363.482.9
GPT-4o-mini (Jul)60.71.684.384.170.228.380.571.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.649, + 0.884, + 0.693 + ], + "angle": 0, + "content": "Table 4: Results on the Berkeley Function Calling Leaderboard (BFCL) v3 as of the Nov 17th, 2024 update. We include the latest versions of the models available on the leaderboard at that time. (A) We use leaderboard results for Llama 3.1 8B and 70B for Llama 3.2 11B and 90B, respectively, given the shared text LLM." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.727, + 0.478, + 0.743 + ], + "angle": 0, + "content": "2.2.2 Agentic multimodal benchmarks and results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.751, + 0.884, + 0.809 + ], + "angle": 0, + "content": "The Amazon Nova Pro and Lite models provide native support for multimodal inputs, including agentic workflows. In this section, we present results from our models on three different benchmarks that require agents to navigate websites to solve real-world tasks. Websites are typically represented as screenshots in these datasets to correctly convey all style elements and visual data as rendered in a standard web browser." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.819, + 0.884, + 0.876 + ], + "angle": 0, + "content": "- VisualWebBench [43]: This benchmark includes seven core tasks related to web browsing, including captioning, question answering, OCR, action prediction, and grounding. All models are evaluated on 1,536 samples that span more than 100 websites from 12 domains. The final metric is the average over different metrics for the individual core tasks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.885, + 0.884, + 0.913 + ], + "angle": 0, + "content": "\\( {}^{3} \\) BFCL is a fast-moving, live benchmark. We report results using the state of the repository and website leaderboard as of Nov 17th, 2024 (commit 8226d)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.502, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.092, + 0.88, + 0.16 + ], + "angle": 0, + "content": "- MM-Mind2Web [86]: This extension of the original Mind2Web [24] benchmark links samples with the original website screenshots, making it multimodal. An agent needs to select an element and pick one of three elementary actions (click, type, or select) alongside a value for some actions. We report micro average over the per-sample step accuracy, where an agent is successful only if element and action selection, as well as the predicted value, are correct." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.165, + 0.88, + 0.232 + ], + "angle": 0, + "content": "- GroundUI-1K [87]: This benchmark is composed of multiple existing datasets, including Mind2Web [24] and repurposes them as a grounding task. On 1,000 samples for evaluation, a multimodal agent is given an instruction and a screenshot of a website from a wide variety of domains and asked to predict the 2D location of the desired UI element. The agent is correct if its predicted 2D location is within the ground truth bounding box." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.092, + 0.88, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.243, + 0.885, + 0.286 + ], + "angle": 0, + "content": "Table 5 shows the results of our models on multimodal agent workflows along with other publicly-reported results. Both Amazon Nova models, Lite and Pro, demonstrate strong visual reasoning and agentic capabilities and achieve high scores on all three benchmarks." + }, + { + "type": "table", + "bbox": [ + 0.191, + 0.295, + 0.807, + 0.548 + ], + "angle": 0, + "content": "
VisualWebBench\ncompositEdMM-Mind2Web\nstep accuracyGroundUI-1K\naccuracy
Nova Pro79.763.781.4
Nova Lite77.760.780.2
Claude 3.5 Sonnet (Oct)76.7M61.6M16.3
GPT-4o (Nov)77.5M55.0M13.4C
GPT-4o Mini (Jul)71.3M58.6M7.2M
GPT-4 (Apr)64.636.8A-
Gemini 1.5 Pro (002)76.4M58.4M35.2B
Gemini 1.5 Flash (002)76.1M46.2M59.9M
Gemini 1.0 Pro (001)48.017.9A-
Llama 3.2 90B73.2M21.6M8.3M
Llama 3.2 11B65.1M22.1M3.7M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.552, + 0.884, + 0.621 + ], + "angle": 0, + "content": "Table 5: Quantitative results on three multi-modal agentic benchmarks: VisualWebBench [43], MM-Mind2Web [86] and GroundUI-1K [87]. Reference numbers are taken from the corresponding benchmark papers [43, 86, 87] and leaderboard [3]. Remarks: (A) uses in-context learning (ICL) (please note that Amazon Nova models do not need to rely on in-context examples); (B) Gemini 1.5 Pro (001); (C) GPT-4o (May); (D) Macro average over individual metrics; (M) Measured by us." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.653, + 0.248, + 0.668 + ], + "angle": 0, + "content": "2.3 Long context" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.678, + 0.884, + 0.749 + ], + "angle": 0, + "content": "We evaluate Amazon Nova Pro, Lite, and Micro on tasks that require the models to understand and reason over long context. These skills are crucial for tasks such as long multi-turn conversations, reasoning over long lists of retrieved documents, or understanding long videos. Amazon Nova Micro, Lite, and Pro models support context lengths of 128k, 300k, and 300k tokens, respectively. We used the following benchmarks to evaluate our models' long context performance:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.757, + 0.88, + 0.812 + ], + "angle": 0, + "content": "- Text Needle-in-a-Haystack (NIAH): Following [40], we assessed each model's ability to locate specific information (the \"needle\") within extensive contexts (the \"haystack\"). This \"needle-in-a-haystack\" test evaluates the model's performance on context lengths starting at \\(32\\mathrm{k}\\), allowing us to measure its ability to accurately retrieve information across varying lengths of input context." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.816, + 0.881, + 0.843 + ], + "angle": 0, + "content": "- SQuALITY [76] (ZeroScrolls Benchmark [69]): Focused on query-based summarization of literary stories, this task evaluates the model's capacity to generate relevant summaries from large contexts." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.848, + 0.881, + 0.888 + ], + "angle": 0, + "content": "- LVBench [77]: This multimodal benchmark includes questions about YouTube videos from various domains such as TV series, sports, broadcasts, and surveillance footage. The LVBench dataset consists of 99 videos and 1,549 questions, covering six different types of tasks such as reasoning, event understanding and summarization." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.757, + 0.881, + 0.888 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.897, + 0.515, + 0.912 + ], + "angle": 0, + "content": "4https://huggingface.co/datasets/AIWinter/LVBench" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.096, + 0.341, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.359, + 0.097, + 0.526, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.56, + 0.097, + 0.77, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.337, + 0.884, + 0.368 + ], + "angle": 0, + "content": "Figure 2: Text Needle-in-a-Haystack recall performance for Nova Micro (up-to 128k), Nova Lite (up-to 300k) and Nova Pro (up-to 300k) models." + }, + { + "type": "table", + "bbox": [ + 0.268, + 0.411, + 0.734, + 0.646 + ], + "angle": 0, + "content": "
SQuALITY ROUGE-LLVBench accuracy
Nova Pro19.8 ±8.741.6 ±2.5
Nova Lite19.2 ±8.640.4 ±2.4
Nova Micro18.8 ±8.6-
Claude 3.5 Sonnet (Jun)13.4 ±7.5-
Gemini 1.5 Pro (001)-33.1 ±2.3
Gemini 1.5 Pro (002)19.1 ±8.6M-
Gemini 1.5 Flash (002)18.1 ±8.4M-
GPT-4o18.8 ±8.630.8 ±2.3
Llama 3 - 70B16.4 ±8.1-
Llama 3 - 8B15.3 ±7.9-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.648, + 0.884, + 0.707 + ], + "angle": 0, + "content": "Table 6: Text and Multimodal long context performance on SQuALITY (ROUGE-L) and LVBench (Accuracy). For SQuALITY, measurements for Claude 3.5 Sonnet, GPT-4o, Llama 3 70B and Llama 3 8B are taken from the Llama 3 report [45]. Gemini results were measured by \\(\\mathrm{us}^2\\) (\\(M\\)). For LVBench, Gemini and GPT-4o numbers were taken from the corresponding benchmark leaderboard [77]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.74, + 0.884, + 0.798 + ], + "angle": 0, + "content": "Results for text and multimodal long context benchmarks are presented in Table 6. In the long video question answering task, both Amazon Nova Pro and Lite demonstrate robust performance on the LVBench dataset, surpassing other models. Amazon Nova models consistently demonstrate exceptional performance in retrieving information from any depth across both text and multimodal understanding use cases, delivering high accuracy and reliability." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.826, + 0.296, + 0.841 + ], + "angle": 0, + "content": "2.4 Functional expertise" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.856, + 0.885, + 0.914 + ], + "angle": 0, + "content": "In addition to core capabilities, foundation models must perform well in particular specialties and domains. Across our many areas of performance analyses, we have selected four domains for which to present benchmarking results: Software engineering, financial analysis, and retrieval-augmented generation. Prompt templates for all benchmarks can be found in Appendix B.3." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "table", + "bbox": [ + 0.191, + 0.089, + 0.809, + 0.426 + ], + "angle": 0, + "content": "
SoftwareFinanceRAG
HumanEval PythonFinQACRAG
tok/sec0-shot pass@10-shot accuracyaccuracy
Nova Pro10089.0 ±4.877.2 ±0.950.3 ±1.9
Nova Lite15785.4 ±5.473.6 ±0.943.8 ±1.9
Nova Micro21081.1 ±6.065.2 ±1.043.1 ±1.9
Claude 3.5 Sonnet (Oct)5793.7 ±3.777.3 ±0.9M52.6 ±1.8M
Claude 3.5 Haiku6488.1 ±5.073.9 ±0.9M31.9 ±1.8M
Gemini 1.5 Pro (002)5887.8 ±5.0M74.4 ±0.9M48.9 ±1.9M
Gemini 1.5 Flash (002)19081.1 ±6.0M73.5 ±1.0M42.4 ±1.9M
Gemini 1.5 Flash 8B (001)28381.1 ±6.0M63.7 ±1.0M37.7 ±1.8M
GPT-4o16390.2 ±4.671.1 ±1.0M52.0 ±1.9M
GPT-4o Mini11387.2 ±5.170.6 ±1.0M49.9 ±1.9M
Llama 3.2 90B4080.5 ±6.172.8 ±1.0M45.2 ±1.9M
Llama 3.2 11B12472.6 ±6.860.8 ±1.1M42.2 ±1.9M
Llama 3.1 8B15772.6 ±6.861.2 ±1.0M42.2 ±1.8M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.429, + 0.884, + 0.5 + ], + "angle": 0, + "content": "Table 7: Performance on select functional benchmarks, including software engineering benchmarks in Python with HumanEval [19], financial reasoning with FinQA [20], and retrieval augmented generation with CRAG [82]. CRAG uses our scoring method described in Section 2.4.3. Where available, reference numbers are taken from the corresponding benchmark papers and technical reports [13, 11, 32, 39, 45, 58]. Additional results were measured \\((M)\\) by \\(\\mathrm{us}^2\\). Model speed in tokens per second (Tok/Sec) is reproduced from section 2.5." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.541, + 0.317, + 0.556 + ], + "angle": 0, + "content": "2.4.1 Software engineering" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.566, + 0.885, + 0.625 + ], + "angle": 0, + "content": "We assessed Amazon Nova's code generation capabilities on the Python coding task HumanEval [19]. The benchmark contains 164 original programming problems with unit tests. These problems assess language comprehension, algorithms, and simple mathematics. Some problems are comparable to simple software interview questions. Table 7 provides the performance of our Nova models and select public models." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.644, + 0.293, + 0.659 + ], + "angle": 0, + "content": "2.4.2 Financial analysis" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.67, + 0.885, + 0.742 + ], + "angle": 0, + "content": "We use FinQA [20] to evaluate Amazon Nova's ability to understand financial data. FinQA is an expert-annotated dataset comprising 8,281 financial question-answer pairs derived from the earnings reports of S&P 500 companies. It evaluates a model's ability to extract information from both tables and unstructured text, while accurately performing calculations using relevant financial knowledge. We report the average post-rounding accuracy under the 0-shot CoT setting. Table 7 provides the performance of Amazon Nova models and select public models on FinQA." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.761, + 0.391, + 0.777 + ], + "angle": 0, + "content": "2.4.3 Retrieval augmented generation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.787, + 0.885, + 0.913 + ], + "angle": 0, + "content": "We evaluate RAG capabilities on the CRAG [82] benchmark using the Task 1 setup, which considers five pre-selected HTML pages as external knowledge to each input question. We extract top-20 text snippets from these pages following the standard retrieval approach used in CRAG's official repository, whereby pages are first cleaned using BeautifulSoup to remove HTML tags, after which the text is then split into sentences or chunks no longer than 1000 characters. These are then encoded using the sentence-transformers/all-MiniLM-L6-v2 model, which is also used to encode the question. The top 20 chunks with highest similarity are passed as context in the input for model inference. We report the percentage of correct responses as judged by an LLM (gpt-4-turbo-2024-04-09), which compares each model's answer with the expected answer using the prompt shown in Appendix B.3.2. Table 7 provides the performance of Amazon Nova models and selected public models on a combined validation and test set of 2,706 examples." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.62, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.092, + 0.309, + 0.107 + ], + "angle": 0, + "content": "2.5 Runtime performance" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.117, + 0.884, + 0.215 + ], + "angle": 0, + "content": "We evaluate the runtime performance of Amazon Nova models using three metrics: Time to First Token (TTFT), Output Tokens per Second (OTPS) and Total Response Time. TTFT is measured as the time, in seconds, it takes to receive the first token from the model after an API request is sent. OTPS is measured as the number of tokens generated per second (tok/sec). It is the rate at which a model produces subsequent output tokens after the first token, reflecting overall throughput and efficiency during inference. Total Response Time measures the total duration in seconds from the submission of the input prompt to the end of generation sequence for a given input-output prompt length. It represents the overall user experience for a model." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.221, + 0.885, + 0.292 + ], + "angle": 0, + "content": "In Figure 3, we show TTFT, OTPS, and Total Response Time using 1000 tokens of input and 100 tokens of output for Amazon Nova models and select public models as reported by Artificial Analysis5, an independent entity that benchmarks AI models and hosting providers. Amazon Nova Micro, Lite and Pro models are among the fastest models in their respective intelligence tiers. Together, all three Amazon Nova models demonstrate state-of-the-art runtime performance, ensuring a smooth and responsive user experience in many real world use cases." + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.897, + 0.461, + 0.912 + ], + "angle": 0, + "content": "5https://artificialanalysis.ai/methodology" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.146, + 0.845, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.151, + 0.378, + 0.842, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.154, + 0.591, + 0.842, + 0.802 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.81, + 0.885, + 0.853 + ], + "angle": 0, + "content": "Figure 3: Time to First Token \\((\\downarrow)\\), Output Tokens per Second \\((\\uparrow)\\), and Total Response Time \\((\\downarrow)\\) using 1,000 tokens of input and 100 tokens of output for Amazon Nova models and select publicly-available models (Artificial Analysis, Nov 29th, 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.427, + 0.106 + ], + "angle": 0, + "content": "3 Amazon Nova Canvas Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.121, + 0.884, + 0.178 + ], + "angle": 0, + "content": "Amazon Nova Canvas is a diffusion model that takes a text prompt and an optional RGB image as input and generates an image as an output conditioned on the input text and optional image. Illustrative examples of the images generated by Amazon Nova Canvas can be found in our Amazon Science blog post \\(^{6}\\). In this section, we provide details on the evaluation strategy and performance of the model both in terms of automated metrics and human evaluation." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.192, + 0.289, + 0.206 + ], + "angle": 0, + "content": "3.1 Automated metrics" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.218, + 0.725, + 0.233 + ], + "angle": 0, + "content": "We use ImageReward [80] and Text-to-Image Faithfulness (TIFA) [38] as automated metrics." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.244, + 0.884, + 0.287 + ], + "angle": 0, + "content": "- ImageReward score is generated from a standardized reward model that aligns human preference with the predicted score. To compute the ImageReward score, we randomly sample 10k prompts from MSCOCO2014 [42] validation set and use this set for calculating the score." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.29, + 0.885, + 0.348 + ], + "angle": 0, + "content": "- Text-to-Image Faithfulness (TIFA) score is a reference-free metric that measures the faithfulness of a generated image to the input text via visual question answering (VQA). The evaluation set for TIFA score is a pre-selected 4k prompts in the TIFA-v1.0 benchmark, sampled from MSCOCO captions [42], DrawBench [66], PartiPrompts [84], and PaintSkill [21] datasets." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.244, + 0.885, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.357, + 0.884, + 0.387 + ], + "angle": 0, + "content": "We compare Amazon Nova Canvas with other publicly-available models including DALL.E 3 [16], Stable Diffusion 3 Medium [27], Stable Diffusion 3.5 Large [28] and Flux (Schnell and Pro) [17]. The results are shown in Table 8." + }, + { + "type": "table", + "bbox": [ + 0.31, + 0.398, + 0.688, + 0.535 + ], + "angle": 0, + "content": "
TIFAImageReward
Amazon Nova Canvas0.8971.250
DALL.E 30.8631.052
Stable Diffusion 3.5 Large0.8911.082
Stable Diffusion 3 Medium0.8810.952
Flux Pro 1.00.8751.075
Flux Schnell0.8820.999
" + }, + { + "type": "table_caption", + "bbox": [ + 0.165, + 0.539, + 0.831, + 0.553 + ], + "angle": 0, + "content": "Table 8: Comparison of TIFA and ImageReward metrics of Amazon Nova Canvas with other models." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.587, + 0.284, + 0.601 + ], + "angle": 0, + "content": "3.2 Human evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.612, + 0.884, + 0.697 + ], + "angle": 0, + "content": "We conduct A/B testing to compare Amazon Nova Canvas with other third-party text-to-image models. The A/B testing prompt set is composed of approximately 1,000 prompts designed to capture customer usage of text-to-image models. This set includes prompts from datasets such as MSCOCO [42], Drawbench [66], OpenParti [84], DALL.E 3 Eval [16], and DOCCI [54] and covers a broad set of categories such as humans, landscapes, natural scenarios, indoor environments, creative themes, artistic themes, and so forth. A few prompts were randomly selected and repeated in order to get additional data points on the quality of the model." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.702, + 0.885, + 0.855 + ], + "angle": 0, + "content": "With each prompt we generate an image from Amazon Nova Canvas as well as each other text-to-image model. We used random seeds to generate the images from Amazon Nova Canvas and all images were generated at \\(1\\mathrm{k}\\times 1\\mathrm{k}\\) resolution. If the prompts trigger filters such that an image is not generated, for either the Amazon Nova Canvas model or the public text-to-image model, we ignore that prompt and do not show it to the human raters. All human evaluation is done in a single-blind manner where the annotator is provided two sets of images, one from Amazon Nova Canvas and the other from the third-party model. The order of the images are randomized for each prompt and annotator. In our blind testing, we ask human annotators to select images that they prefer based on (1) text-image alignment, which measures the instruction-following capability of the model, and (2) image quality, which quantifies the overall preference of the annotators. To ensure rigorous, consistent, and unbiased evaluation, we used a third-party vendor for human evaluation. We created guidelines that were used to train the annotators so that the decision-making criteria were clear to them in each dimension." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.86, + 0.884, + 0.89 + ], + "angle": 0, + "content": "The pair-wise results comparing Amazon Nova Canvas with OpenAI DALL.E 3 and Google Imagen 3 are shown in Table 9, including win, tie, loss rate. The win rate reflects the percentage of samples where Amazon Nova Canvas was" + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.898, + 0.603, + 0.912 + ], + "angle": 0, + "content": "\\(^{6}\\) https://www.amazon.science/blog/amazon-nova-canvas-examples" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.885, + 0.135 + ], + "angle": 0, + "content": "preferred over the other model while the tie rate indicates the scenario where the human annotator did not perceive a difference between the two models. As can be seen in the results, Amazon Nova Canvas has a higher win rate compared to the other text-to-image models." + }, + { + "type": "table", + "bbox": [ + 0.127, + 0.146, + 0.872, + 0.228 + ], + "angle": 0, + "content": "
Nova Canvas versus:DALL.E 3Imagen 3
win ratetie rateloss ratewin ratetie rateloss rate
Overall preference (image quality)54.56.439.148.25.346.5
Instruction following (text-image alignment)39.422.538.138.428.133.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.231, + 0.884, + 0.261 + ], + "angle": 0, + "content": "Table 9: The win, tie, and loss rates (%) from human evaluation of Amazon Nova Canvas versus (a) DALL.E 3 and (b) Imagen 3." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.298, + 0.405, + 0.314 + ], + "angle": 0, + "content": "4 Amazon Nova Reel Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.329, + 0.884, + 0.385 + ], + "angle": 0, + "content": "Amazon Nova Reel is a diffusion model that takes a text prompt and an optional RGB image as input and generates a video as an output conditioned on the input text and optional image. Illustrative examples of the videos generated by the Amazon Nova Reel can be found in our Amazon Science blog post.7 In this section, we provide details on the evaluation strategy and performance of the model." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.401, + 0.341, + 0.415 + ], + "angle": 0, + "content": "4.1 Human evaluation metrics" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.426, + 0.886, + 0.497 + ], + "angle": 0, + "content": "To evaluate Amazon Nova Reel, we rely on human feedback to assess the generated videos across two primary axes: video quality and video consistency. All evaluations are conducted through single-blind pairwise comparisons. Human annotators are provided a set of two videos shown side-by-side and are asked to choose the better video or mark them as equal if they find the videos to be equally performant across the metric on which they are evaluating. All videos were generated in 720p resolution and different random seeds were used during generation." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.502, + 0.884, + 0.532 + ], + "angle": 0, + "content": "The video quality axis encapsulates the technical and perceptual aspects of the generated video via four primary components:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.542, + 0.88, + 0.57 + ], + "angle": 0, + "content": "- Image quality: The visual appeal of individual frames, including resolution, sharpness, object clarity, and overall composition, where each frame is visually pleasing and artifact-free." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.575, + 0.88, + 0.603 + ], + "angle": 0, + "content": "- Motion quality: The fluidity of movement across frames, including motion consistency and smooth transitions without flickering, distortion, or abrupt shifts, contributing to natural and realistic motion portrayal." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.608, + 0.88, + 0.635 + ], + "angle": 0, + "content": "- Image-text alignment: How closely individual frames match the prompt, considering the presence of described entities, their attributes, spatial relationships, colors, and other static visual details." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.64, + 0.88, + 0.681 + ], + "angle": 0, + "content": "- Motion-text alignment: The accuracy of dynamic elements, including the correctness of actions performed by entities, camera movements, and temporal changes in attributes, as well as adherence to the provided description." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.542, + 0.88, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.693, + 0.884, + 0.723 + ], + "angle": 0, + "content": "The video quality axis additionally includes factors influencing overall appeal, such as motion degree, entity size, creative composition, and general video likability." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.727, + 0.884, + 0.785 + ], + "angle": 0, + "content": "The video consistency axis encapsulates the temporal coherence of both subjects and backgrounds throughout the video. It includes assessments of the maintenance of entity size, shape, and appearance, as well as background stability without unexpected morphing or changes. A high score in this dimension means believable spatial relationships between foreground and background elements throughout the video duration." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.79, + 0.884, + 0.82 + ], + "angle": 0, + "content": "In combination, the video quality and video consistency metrics provide a holistic and robust evaluation framework for video generation models by considering both technical accuracy and perceptual appeal." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.834, + 0.208, + 0.848 + ], + "angle": 0, + "content": "4.2 Dataset" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.86, + 0.884, + 0.89 + ], + "angle": 0, + "content": "We curated a diverse set of prompts designed to capture various aspects of video generation. The prompts are distributed across 6 broad categories: human and activities, animals, natural scenery and landscapes, indoor scenes, objects" + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.898, + 0.585, + 0.912 + ], + "angle": 0, + "content": "7https://www.amazon.science/blog/amazon-nova-reel-examples" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.619, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.885, + 0.218 + ], + "angle": 0, + "content": "interactions, and creative scenes and activities. This broad categorization ensures that the evaluation covers a wide range of real-world scenarios. We structured the prompt set to cover various motion-related aspects, which is critical for assessing motion-text alignment in the generated videos. For example, we included prompts with a variety of camera motions to evaluate how well the models follow instructions related to camera movement. Additionally, we incorporated dynamic attributes [71], in which the subject or background undergoes state or shape changes over time, which allows us to evaluate the model's ability to generate evolving entities. Finally, we added prompts that require motion binding [71], where specific compositions of movements and actions are requested, enabling us to assess how well models can generate complex, coordinated motions. The curated prompt set consists of approximately 700 prompts, all from various open source benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.233, + 0.383, + 0.248 + ], + "angle": 0, + "content": "4.3 Implementation details & results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.259, + 0.885, + 0.412 + ], + "angle": 0, + "content": "To ensure a rigorous, consistent and unbiased evaluation process, we outsourced the annotation collection process to a third-party vendor. We created detailed guidelines, in which annotators were given comprehensive instructions and examples for each evaluation dimension, ensuring clarity on the criteria for marking preferences between videos. These guidelines included examples of different scenarios to aid in decision-making across our evaluation axes. Alongside this, we ensured that annotators were trained using expert-provided examples, with each round of annotations subject to spot checks. Specifically, \\(5 - 10\\%\\) of the data from each batch was randomly selected and reviewed by expert annotators. Based on this feedback, the vendor continuously refined the annotators' understanding and accuracy, ensuring a high standard of evaluation across the board. To further enhance the reliability of the results, we employed a consensus voting system. For each video comparison, annotations were collected from three different evaluators, and a majority voting approach was used to determine the final outcome. This method helps reduce individual biases and ensures that the final assessments are based on collective judgment, thereby increasing the robustness of the evaluation." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.417, + 0.884, + 0.502 + ], + "angle": 0, + "content": "For reporting performance, we conducted pairwise comparisons between Amazon Nova Reel and other state-of-the-art models including Gen3 Alpha [65] by Runway ML and Luma 1.6 [47] by Luma Labs. We report results in terms of win, tie, and loss rates. The win rate reflects the percentage of samples where Amazon Nova Reel was preferred over the other model, while the tie rate indicates cases where no perceptible difference between the two models was found by the evaluators. Using the curated prompt set described earlier, we evaluate the models across all the dimensions outlined above, and report the results in Table 10." + }, + { + "type": "table", + "bbox": [ + 0.212, + 0.513, + 0.789, + 0.594 + ], + "angle": 0, + "content": "
Nova Reel versus:Runway Gen3 AlphaLuma 1.6
win ratetie rateloss ratewin ratetie rateloss rate
Video Quality56.49.933.751.13.445.5
Video Consistency67.09.123.974.75.120.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.598, + 0.884, + 0.626 + ], + "angle": 0, + "content": "Table 10: The win, tie, and loss rates \\((\\%)\\) from human evaluation of Amazon Nova Reel versus (a) Gen3-Alpha and (b) Luma1.6." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.651, + 0.884, + 0.695 + ], + "angle": 0, + "content": "In video consistency, Amazon Nova Reel achieved win rates of \\(67.0\\%\\) against Gen3 Alpha and \\(74.7\\%\\) against Luma 1.6, demonstrating superior subject and background coherence. For video quality, Amazon Nova Reel secured win rates of \\(56.4\\%\\) against Gen3 Alpha and \\(51.1\\%\\) against Luma 1.6." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.713, + 0.275, + 0.73 + ], + "angle": 0, + "content": "5 Responsible AI" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.744, + 0.885, + 0.829 + ], + "angle": 0, + "content": "Our approach to Responsible AI (RAI) is structured around eight foundational dimensions [10] shown in Table 11. These dimensions guide our approach to RAI for the Amazon Nova family of models, which we articulate in the following three sections: (1) defining our RAI design objectives, (2) our actions to ensure adherence to these objectives, and (3) system evaluation and red teaming. The last two components form a continuous loop of model development and human/automated verification to ensure that our Amazon Nova models are aligned with our RAI objectives and deliver an exceptional and delightful customer experience." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.845, + 0.35, + 0.86 + ], + "angle": 0, + "content": "5.1 Defining our RAI objectives" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.87, + 0.884, + 0.913 + ], + "angle": 0, + "content": "We operationalize our RAI dimensions into a series of detailed design objectives that guide our decision-making throughout the entire model development lifecycle, from initial data collection and pre-training to the implementation of post-deployment runtime mitigations." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.089, + 0.884, + 0.292 + ], + "angle": 0, + "content": "
TermDefinition
FairnessConsidering impacts on different groups of stakeholders
ExplainabilityUnderstanding and evaluating system outputs
Privacy and securityAppropriately obtaining, using, and protecting data and models
SafetyPreventing harmful system output and misuse
ControllabilityHaving mechanisms to monitor and steer AI system behavior
Veracity and robustnessAchieving correct system outputs, even with unexpected or adversarial inputs
GovernanceIncorporating best practices into the AI supply chain, including providers and deployers
TransparencyEnabling stakeholders to make informed choices about their engagement with an AI system
" + }, + { + "type": "table_caption", + "bbox": [ + 0.324, + 0.295, + 0.673, + 0.31 + ], + "angle": 0, + "content": "Table 11: Our eight core Responsible AI dimensions" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.35, + 0.885, + 0.435 + ], + "angle": 0, + "content": "In addition to being grounded on the RAI dimensions, our objectives are informed by relevant laws and regulations, voluntary frameworks, and our commitments to our customers, and they undergo an internal alignment process that includes reviews from a number of stakeholders. We will continue to iterate on these objections as we engage with external experts and participate in industry and government forums, including the Frontier Model Forum [29], Partnership on AI [5], and various forums organized by government agencies such as the National Institute of Standards and Technology (NIST) of the U.S. Department of Commerce [7]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.454, + 0.884, + 0.566 + ], + "angle": 0, + "content": "Our commitment to Responsible Scaling: As the capabilities of AI models increase (through increased training data, model size or architecture innovations), so do the potential risks that they present. We joined other technology companies in signing on to the White House's voluntary commitments on the safe, secure, and transparent development and use of foundation models [6]. Since then we have actively participated in other efforts, including the AI Safety Summits in the UK and Seoul, and we have committed to new standards like the G7 AI Hiroshima Process Code of Conduct [30] in accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. We also started a partnership with the Model Evaluation and Threat Research (METR) center8 to enrich our Controllability design objectives." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.587, + 0.421, + 0.603 + ], + "angle": 0, + "content": "5.2 Ensuring adherence to RAI objectives" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.615, + 0.885, + 0.74 + ], + "angle": 0, + "content": "We employed a number of methods to measure and ensure compliance for each of our core RAI dimensions depending on their scope (i.e., whether they apply to model output, data management or other processes). For the dimensions that govern model behavior (Safety, Fairness, Veracity and Robustness, Controllability, and Privacy and Security), we curated the pre-training data and we used both Supervised Fine Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methods to align our models. Based on the objectives for each RAI dimension, we created single- and multi-turn RAI demonstrations in multiple languages and conducted helpfulness/harmfulness studies to decide on SFT data mixes. We collected human preference data to be used as inputs to RLHF training where we also provided an RAI-specific reward model. We also identify risk areas during our offline evaluation or red teaming exercises (Section 5.4) and collect semantically similar examples to be included in future SFT and RLHF rounds." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.746, + 0.884, + 0.817 + ], + "angle": 0, + "content": "In addition to the RAI model alignment, we built runtime input and output moderation models which serve as a first and last line of defense and allow us to respond more quickly to newly identified threats or gaps in model alignment. The main role of the input moderation model is to detect prompts that contain malicious, insecure or illegal material, or attempt to bypass the core model alignment (prompt injection, jailbreaking). Similarly, the output moderation ensures that the content adheres to our RAI objectives." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.822, + 0.884, + 0.879 + ], + "angle": 0, + "content": "We have a rigorous Governance methodology, developing our models in a working-backwards product process that incorporates RAI at the design phase, design consultations and implementation assessments by dedicated RAI science and data experts, and includes routine testing, reviews with customers, best practice development, dissemination, and training." + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.897, + 0.276, + 0.912 + ], + "angle": 0, + "content": "8https://metr.org/" + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.885, + 0.175 + ], + "angle": 0, + "content": "We work to ensure that our Privacy and Security objectives are adhered to for both the model and training data. In addition to the model output alignment described above, we take measures that include data access controls [9] protecting our model training data, resulting weights, and model versions, and watermarking model outputs (see below). We address the latter through several layers of defense, including de-identifying or removing certain types of personal data from our training data, when feasible, as well as evaluation through red teaming exercises that cover data privacy assessments." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.181, + 0.885, + 0.254 + ], + "angle": 0, + "content": "For Explainability of our models' outputs we conduct and leverage the current active research in the area of Explainable AI to deeply understand our models' current behavior, their potential future behavior, and to build capabilities to continuously correct their behavior as and when necessary. We use various explainable AI methods throughout our model development to guide our decisions regarding RAI alignment and other mitigations. Services like Clarify [8] also enable our downstream developers to easily explain model predictions." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.257, + 0.884, + 0.329 + ], + "angle": 0, + "content": "To work to ensure our models' Robustness against adversarial inputs such as those that attempt to bypass alignment guardrails, we focused on risks applicable to both developers building applications using our models, and users interacting with our models via those applications. We organized those risks in broad categories such as sensitive data exfiltration, execution of unauthorized action, degradation of run-time model service availability, and malicious content generation. We used this risk organization to build model resiliency against interactions that lead to the prioritized risks." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.333, + 0.884, + 0.445 + ], + "angle": 0, + "content": "Finally, to maximize Transparency, we incorporate an invisible watermark during the image or video generation process and add \\(\\mathrm{C2PA}^9\\) metadata in all Canvas generated content. We enhanced the robustness to alterations like rotation, resizing, color inversion, and flipping. For videos, we embed our watermark in each frame and ensure that our watermarking and detection methods withstand H264 compression. To enable anyone to easily detect the watermarks in Amazon Nova generated content, an API will be available soon after launch. Our watermark detection system introduces several enhancements such as making confidence score-based predictions instead of a single binary prediction that reflects the extent to which the generated content has been edited even when using external tools. The new detection system covers both images and videos." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.461, + 0.266, + 0.475 + ], + "angle": 0, + "content": "5.3 RAI Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.486, + 0.884, + 0.611 + ], + "angle": 0, + "content": "Throughout model development we perform extensive RAI evaluations using publicly available benchmarks like BOLD [25], RealToxicityPrompts [31], and MM-SafetyBench [44]. We also built a series of proprietary, dynamically updating benchmarks. To build them, our internal data annotation team created a diverse set of examples for each of our RAI dimensions. In addition, we leveraged subject-matter experts in specific areas, such as Security and Controllability, to collect adversarial prompts. We continued updating and enhancing each dataset based on evaluation and red teaming results (see Section 5.4 for more details on red teaming). This kept the internal benchmarks evergreen, avoiding overfitting during development, but also made sure the models do not regress against previously identified risks. Our datasets comprise inputs in multiple languages and multiple modalities, and contain single-turn and multi-turn conversation examples." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.628, + 0.248, + 0.643 + ], + "angle": 0, + "content": "5.4 Red Teaming" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.653, + 0.885, + 0.711 + ], + "angle": 0, + "content": "Static benchmarks give us a view of how well models perform per RAI dimension against a user's \"plain\" intent (i.e. the prompts explicitly state the intent of the user to generate prohibited content). To test our models' resilience against techniques that mask the users' intent we rely on red teaming. We employed a multi-pronged evaluation strategy consisting of internal red teaming, red teaming with third party and subject matter experts and, automated red teaming." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.725, + 0.322, + 0.74 + ], + "angle": 0, + "content": "5.4.1 Internal Red Teaming" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.749, + 0.885, + 0.888 + ], + "angle": 0, + "content": "We used a team of trained data analysts and subject-matter experts to perform regular red teaming exercises to evaluate the model's robustness against adversarial prompts across all our RAI dimensions. We enhanced the diversity of manually curated adversarial prompts by employing linguistic, structural, and modality based prompt mutation techniques, assessing each mutation for its effectiveness at generating a response that does not adhere to our RAI objectives, likelihood of its success, and the technique's novelty to a model revision. In total, we identified and developed over 300 distinct techniques (see Figure 4), and tested techniques individually and via chaining various combinations. The attacks covered multiple languages and modalities, targeting each language/modality individually and in combination. We designed cross-modality attacks, such as embedding adversarial content within seemingly benign visual inputs, to evaluate the models' ability to handle complex scenarios involving multiple input types. Where appropriate, we implemented automation to further improve the diversity, reliability, and efficiency of red teaming." + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.897, + 0.276, + 0.913 + ], + "angle": 0, + "content": "9https://c2pa.org/" + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "image", + "bbox": [ + 0.158, + 0.09, + 0.845, + 0.43 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.186, + 0.441, + 0.812, + 0.457 + ], + "angle": 0, + "content": "Figure 4: Broad taxonomy and count of attack techniques we use for our red teaming exercises" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.49, + 0.884, + 0.519 + ], + "angle": 0, + "content": "After each round of red teaming, we gathered feedback from the team regarding failure patterns which guided the next stage of the model development." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.54, + 0.325, + 0.556 + ], + "angle": 0, + "content": "5.4.2 External Red Teaming" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.567, + 0.885, + 0.761 + ], + "angle": 0, + "content": "In accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Artificial Intelligence, we partner with a variety of third parties to conduct red teaming against our AI models. These initiatives are in addition to our extensive in-house efforts, which includes all aspects of Cybersecurity red teaming. Just like with our internal red teaming efforts, we iterated during the model development based on feedback from these institutions to improve the RAI adherence of our models. We leverage red-teaming firms including ActiveFence to conduct testing in areas such as hate speech, political misinformation, extremism and other RAI dimensions. We also work with specialized third parties to red team our models for Chemical, Biological, Radiological and Nuclear (CBRN) capabilities. Our work with Deloitte Consulting, tests our AI models' capabilities in Biological risks and harms. Our work with Nemesys Insights LLC tests our AI models' capabilities in the Radiological and Nuclear domains. We also work with the Gomes Group at Carnegie Mellon University to test our models' capabilities in Chemistry and chemical compounds. Each of these partners was carefully selected based on their industry leadership, previous/parallel red teaming work with other AI model developers, and their contributions to evolving government and industry standards around CBRN and overall AI safety. We provide a brief summary of expertise of each of these vendors and their testing methodology below." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.767, + 0.884, + 0.837 + ], + "angle": 0, + "content": "ActiveFence: ActiveFence is a team of over 150 subject matter experts providing AI Safety and Content Moderation solutions. The team produced over 9,700 adversarial prompts, distributed over 20 categories, including content-targeted red teaming (evaluating the model's ability to generate harmful or inappropriate content), and security-targeted red teaming (assessing the model's resilience against malicious attempts to manipulate its behavior or extract sensitive information)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.843, + 0.884, + 0.913 + ], + "angle": 0, + "content": "Deloitte: The evaluation team at Deloitte Consulting LLP (formerly known as Gryphon Scientific) has unique experience at the intersection of artificial intelligence and biology. The primary thrust of this effort involved evaluating the model against a panel of 30 questions developed to test an LLM's scientific knowledge and reasoning capabilities that could facilitate the development or use of biological weapons. The model's responses to these questions were evaluated for their scientific accuracy and utility to someone seeking to do harm with biology. After completing the initial" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.121 + ], + "angle": 0, + "content": "evaluations, the Deloitte team probed more deeply into the questions the LLM originally replied with potentially concerning information." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.127, + 0.884, + 0.239 + ], + "angle": 0, + "content": "Gomes Group: The Gomes Group at Carnegie Mellon University is at the forefront of integrating advanced artificial intelligence into chemical research. Their evaluation framework consisted of both automated and non-automated assessments. Two non-automated evaluations explored aggregation attack vulnerabilities through purchasing and remote chemical mixing scenarios. The automated evaluations utilized two distinct datasets: one containing 39 hazardous chemicals (including DEA Schedule I, II, and chemical warfare agents) and another with 362 common chemicals for NFPA diamond classifications. Three primary automated evaluations were conducted using the hazardous chemicals dataset. The NFPA diamond evaluation comprised 1,810 prompts, testing both single-turn and multi-turn approaches with consistent accuracy across both methods." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.243, + 0.885, + 0.356 + ], + "angle": 0, + "content": "Nemesys: Nemesys Insights LLC run uplift studies, red teaming exercises, and risk assessments for a variety of technology companies and third-party research entities to assess national security related risks of large language models and other generative AI tools. For their testing, they started with human red teaming exercises focused on non-state acquisition or use of illicit radiological/nuclear (RN) materials, followed by prompt-response evaluation and uplift studies. The exercises comprised two different scenarios (a. violent non-state actor acquisition and use of Cobalt-60; b. non-state actor acquisition and international transport of HEU [highly enriched uranium]), and utilized 8 subject matter experts with operational and technological knowledge in a 2-team x 2-scenario design to construct and refine threat plans across a 6-hour planning cycle." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.369, + 0.342, + 0.384 + ], + "angle": 0, + "content": "5.4.3 Automated Red Teaming" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.392, + 0.884, + 0.545 + ], + "angle": 0, + "content": "Finally, to augment human based red teaming, we built an automated red teaming mechanism by adapting our (Feedback Loop In-context Red Teaming) FLIRT [52] framework. This approach helped us scale red teaming and repeat red teaming efficiently. FLIRT uses a list of seed prompts that have been identified by human evaluators as potentially violating one or more of our RAI dimensions. For every dimension, a subset of seeds is used to generate additional prompts with a dedicated language model, called red-LM, through in-context-learning (ICL) [18] and a carefully crafted set of instructions. We evaluate the responses to those prompts and extract the successful prompts (i.e., the ones triggering a prohibited response) for the next round of generation. The above steps are repeated for a chosen number of iterations across all RAI categories. We use our automated red teaming mechanism to evaluate both RAI adherence robustness and false refusals. We use the mechanism to generate adversarial tests across multi-turn interactions, multiple languages, and multiple input/output modalities to uncover and correct robustness issues in our models due to potential adversarial content in such interactions and inputs." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.564, + 0.347, + 0.581 + ], + "angle": 0, + "content": "6 Training Infrastructure" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.595, + 0.884, + 0.72 + ], + "angle": 0, + "content": "The Nova family of models were trained on Amazon's custom Trainium1 (TRN1) chips,\\(^{10}\\) NVidia A100 (P4d instances), and H100 (P5 instances) accelerators. Working with AWS SageMaker, we stood up NVidia GPU and TRN1 clusters and ran parallel trainings to ensure model performance parity, while optimizing training throughput on the different stacks. All clusters utilize petabit-scale non-blocking EFA network fabric which is less prone to packet loss than other network transport protocols\\(^{11}\\) and provides the highest network bandwidth with H100 accelerators compared to any other instance type available on AWS EC2\\(^{12}\\). We conducted distributed training on AWS SageMaker-managed Elastic Kubernetes Service (EKS) clusters, and utilized AWS File System X (FSx) and Simple Storage Solution (S3) for data and checkpoint IO. While FSx offers performant and convenient storage for large scale training jobs, S3 allowed cost-efficient scaling to large multimodal datasets and model checkpoints." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.726, + 0.884, + 0.825 + ], + "angle": 0, + "content": "Goodput achieved weekly average values of up to \\(97\\%\\) in pretraining runs through optimizations targeting lower job failure rate, minimizing checkpoint overhead, and overall reduction in the Mean Time to Restart (MTTR). This time is inclusive of time from the last successful checkpoint before training interruption, time taken to restart components of the system and resume training at steady state from checkpoint. Techniques such as fully distributed optimizer state and weight sharding and the elimination of all blocking overhead associated with checkpoint persistence resulted in a reduction of checkpointing overhead to \\(\\sim 1\\) sec on H100 clusters, and \\(\\sim 0.1\\) sec on TRN1 clusters. We exceeded our MTTR target of 9 minutes and achieved an average of 6.5 minutes on our TRN1 clusters by optimizing the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.832, + 0.884, + 0.859 + ], + "angle": 0, + "content": "\\(^{10}\\)https://aws.amazon.com/blogs/aws/amazon-ec2-trn1-instances-for-high-performance-model-training-g-are-now-available/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.86, + 0.882, + 0.884 + ], + "angle": 0, + "content": "11https://www.amazon.science/publications/a-cloud-optimized-transport-protocol-for-elastic-and-scalable-hpc" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.885, + 0.882, + 0.911 + ], + "angle": 0, + "content": "\\(^{12}\\)https://aws.amazon.com/blogs/aws/new-amazon-ec2-p5-instances-powered-by-nvidia-h100-tensor-core-gpus-for-accelerating- generative-ai-and-hpc-applications/" + }, + { + "type": "list", + "bbox": [ + 0.112, + 0.832, + 0.884, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.619, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.091, + 0.884, + 0.175 + ], + "angle": 0, + "content": "node communication initialization in the training startup process and reduced time to load checkpoints through an asynchronous observer process. This process maps each latest checkpoint file to its corresponding node in the cluster. When resuming from the checkpoint, each node only loads the checkpoint files for its corresponding rank, reducing the time taken to discover the latest checkpoint from 3 minutes to 5 seconds. We also cache and reuse data indices to optimize training data loading initialization time. These improvements reduced data loading initialization to 205ms per restart." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.181, + 0.884, + 0.28 + ], + "angle": 0, + "content": "To increase training efficiency we developed a new activation checkpointing scheme called Super-Selective Activation Checkpointing (SSC). SSC minimizes activation re-computation in memory-constrained environments, reducing memory consumption by \\(\\sim 50\\%\\) while adding \\(\\sim 2\\%\\) re-computation overhead compared to NVidia's Selective Checkpointing. We also found optimizations in default gradient reduction behavior and the default PyTorch memory allocator behavior. The default gradient reduction behavior leads to suboptimal communication overlap and we found the synchronous nature of the default PyTorch allocation led to stragglers in collectives resulting in multiple stalled workers. We adjusted the gradient reduction order and frequency, allowing us to overlap the majority of data parallelism communication." + }, + { + "type": "page_number", + "bbox": [ + 0.488, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.09, + 0.21, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.113, + 0.812, + 0.13 + ], + "angle": 0, + "content": "[1] Efficient Batch Computing - AWS Batch - AWS, 2024. URL https://aws.amazon.com/batch/." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.137, + 0.752, + 0.153 + ], + "angle": 0, + "content": "[2] Big Data Platform - Amazon EMR - AWS, 2024. URL https://aws.amazon.com/emr/." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.161, + 0.885, + 0.188 + ], + "angle": 0, + "content": "[3] AgentStudio. Gemini flash. https://computer-agents.github.io/agent-studio/, 2024. Accessed: 2024-11-29." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.198, + 0.885, + 0.281 + ], + "angle": 0, + "content": "[4] P. Agrawal, S. Antoniak, E. B. Hanna, B. Bout, D. Chaplot, J. Chudnovsky, D. Costa, B. D. Monicault, S. Garg, T. Gervet, S. Ghosh, A. Héliou, P. Jacob, A. Q. Jiang, K. Khandelwal, T. Lacroix, G. Lample, D. L. Casas, T. Lavril, T. L. Scao, A. Lo, W. Marshall, L. Martin, A. Mensch, P. Muddireddy, V. Nemychnikova, M. Pellat, P. V. Platen, N. Raghuraman, B. Rozière, A. Sablayrolles, L. Saulnier, R. Sauvestre, W. Shang, R. Soletskyi, L. Stewart, P. Stock, J. Studnia, S. Subramanian, S. Vaze, T. Wang, and S. Yang. Pixtral 12B, 2024. URL https://arxiv.org/abs/2410.07073." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.29, + 0.881, + 0.319 + ], + "angle": 0, + "content": "[5] Amazon. Amazon joins Partnership on AI. https://www/aboutamazon.com/news/amazon-ai/amazon-joints-partnership-on-ai, 2016. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.328, + 0.882, + 0.356 + ], + "angle": 0, + "content": "[6] Amazon. Our commitment to the responsible use of AI. https://www/aboutamazon.com/news/company-news/amazon-responsible-ai, 2023. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.365, + 0.882, + 0.407 + ], + "angle": 0, + "content": "[7] Amazon. Amazon joins US Artificial Intelligence safety institute to advance responsible AI. https://www.abou tamazon.com/news/policy-news-views/amazon-joins-us-artificial-intelligence-safety-i nstitute-to-advance-responsible-ai, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.416, + 0.885, + 0.443 + ], + "angle": 0, + "content": "[8] Amazon. Amazon SageMaker Clarify. https://aws.amazon.com/sagemaker/clarify/, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.452, + 0.885, + 0.481 + ], + "angle": 0, + "content": "[9] Amazon. Data protection & privacy at AWS. https://aws.amazon.com/compliance/data-protection/, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.489, + 0.885, + 0.517 + ], + "angle": 0, + "content": "[10] Amazon. Building AI responsibly at AWS. https://aws.amazon.com/ai/responsible-ai/, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.526, + 0.885, + 0.569 + ], + "angle": 0, + "content": "[11] Anthropic. The Claude 3 model family: Opus, Sonnet, Haiku. Technical report, Anthropic, 2023. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.577, + 0.872, + 0.593 + ], + "angle": 0, + "content": "[12] Anthropic. Claude Sonnet. https://www.anthropic.com/claude/sonnet, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.601, + 0.681, + 0.617 + ], + "angle": 0, + "content": "[13] Anthropic AI. Claude 3.5 Sonnet model card addendum. Technical report, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.624, + 0.882, + 0.653 + ], + "angle": 0, + "content": "[14] Anthropic AI Team. Claude 3.5 Haiku and upgraded Claude 3.5 Sonnet, 2024. URL https://assets.anthropic.com/m/1cd9d098ac3e6467/original/Claude-3-Model-Card-October-Addendum.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.661, + 0.856, + 0.677 + ], + "angle": 0, + "content": "[15] S. Arora and B. Barak. Computational complexity: a modern approach. Cambridge University Press, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.684, + 0.882, + 0.714 + ], + "angle": 0, + "content": "[16] J. Betker, G. Goh, L. Jing, T. Brooks, J. Wang, L. Li, L. Ouyang, J. Zhuang, J. Lee, Y. Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.722, + 0.808, + 0.737 + ], + "angle": 0, + "content": "[17] Black Forest Labs. Flux models. 2024. URL https://github.com/black-forest-labs/flux." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.745, + 0.885, + 0.773 + ], + "angle": 0, + "content": "[18] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.782, + 0.885, + 0.811 + ], + "angle": 0, + "content": "[19] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman, et al. Evaluating large language models trained on code, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.818, + 0.885, + 0.861 + ], + "angle": 0, + "content": "[20] Z. Chen, W. Chen, C. Smiley, S. Shah, I. Borova, D. Langdon, R. N. Moussa, M. I. Beane, T.-H. K. Huang, B. R. Routledge, and W. Y. Wang. FinQA: A dataset of numerical reasoning over financial data. ArXiv, abs/2109.00122, 2021. URL https://api-semanticscholar.org/CorpusID:235399966." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.87, + 0.885, + 0.911 + ], + "angle": 0, + "content": "[21] J. Cho, A. Zala, and M. Bansal. DALL-eval: Probing the reasoning skills and social biases of text-to-image generation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3043-3054, 2023." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.113, + 0.885, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.122 + ], + "angle": 0, + "content": "[22] P. Clark, I. Cowhey, O. Etzioni, T. Khot, A. Sabharwal, C. Schoenick, and O. Tafjord. Think you have solved question answering? try ARC, the AI2 reasoning challenge. arXiv:1803.05457v1, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.129, + 0.885, + 0.172 + ], + "angle": 0, + "content": "[23] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano, C. Hesse, and J. Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.181, + 0.885, + 0.213 + ], + "angle": 0, + "content": "[24] X. Deng, Y. Gu, B. Zheng, S. Chen, S. Stevens, B. Wang, H. Sun, and Y. Su. Mind2Web: Towards a generalist agent for the web. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.22, + 0.885, + 0.29 + ], + "angle": 0, + "content": "[25] J. Dhamala, T. Sun, V. Kumar, S. Krishna, Y. Pruksachatkun, K.-W. Chang, and R. Gupta. BOLD: Dataset and metrics for measuring biases in open-ended language generation. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21, page 862-872, New York, NY, USA, 2021. Association for Computing Machinery. ISBN 9781450383097. doi: 10.1145/3442188.3445924. URL https://doi.org/10.1145/3442188.3445924." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.299, + 0.885, + 0.331 + ], + "angle": 0, + "content": "[26] D. Dua, Y. Wang, P. Dasigi, G. Stanovsky, S. Singh, and M. Gardner. DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs. In Proc. of NAACL, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.338, + 0.885, + 0.383 + ], + "angle": 0, + "content": "[27] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3-medium." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.39, + 0.885, + 0.422 + ], + "angle": 0, + "content": "[28] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Stable Diffusion 3.5. 2024. URL https://stability.ai/news/introducing-stable-diffusion-3-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.429, + 0.885, + 0.472 + ], + "angle": 0, + "content": "[29] Frontier Model Forum. Amazon and Meta join the Frontier Model Forum to promote AI safety. https://www.frontiermodelforum.org/updates/amazon-and-meta-join-the-frontier-model-forum-t-o-promote-ai-safety/, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.481, + 0.885, + 0.513 + ], + "angle": 0, + "content": "[30] G7 Hiroshima Summit. Hiroshima process international code of conduct for organizations developing advanced AI systems. https://www.mofa.go.jp/files/100573473.pdf, 2023. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.52, + 0.885, + 0.591 + ], + "angle": 0, + "content": "[31] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith. RealToxicityPrompts: Evaluating neural toxic degeneration in language models. In T. Cohn, Y. He, and Y. Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 3356-3369, Online, Nov. 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.findings-emnlp.301. URL https://aclanthology.org/2020-findings-emnlp.301." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.6, + 0.885, + 0.63 + ], + "angle": 0, + "content": "[32] Gemini Team. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024. URL https://arxiv.org/abs/2403.05530." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.638, + 0.885, + 0.668 + ], + "angle": 0, + "content": "[33] Google Deepmind. Gemini Flash. https://deepmind.google/technologies/gemini/flash/, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.676, + 0.885, + 0.707 + ], + "angle": 0, + "content": "[34] N. Goyal, C. Gao, V. Chaudhary, P.-J. Chen, G. Wenzek, D. Ju, S. Krishnan, M. Ranzato, F. Guzmán, and A. Fan. The FLORES-101 evaluation benchmark for low-resource and multilingual machine translation. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.715, + 0.885, + 0.746 + ], + "angle": 0, + "content": "[35] F. Guzmán, P.-J. Chen, M. Ott, J. Pino, G. Lample, P. Koehn, V. Chaudhary, and M. Ranzato. Two new evaluation datasets for low-resource machine translation: Nepali-english and sinhala-english. 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.753, + 0.885, + 0.798 + ], + "angle": 0, + "content": "[36] D. Hendrycks, C. Burns, S. Basart, A. Zou, M. Mazeika, D. Song, and J. Steinhardt. Measuring massive multitask language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.806, + 0.885, + 0.837 + ], + "angle": 0, + "content": "[37] D. Hendrycks, C. Burns, S. Kadavath, A. Arora, S. Basart, E. Tang, D. Song, and J. Steinhardt. Measuring mathematical problem solving with the MATH dataset. NeurIPS, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.844, + 0.885, + 0.888 + ], + "angle": 0, + "content": "[38] Y. Hu, B. Liu, J. Kasai, Y. Wang, M. Ostendorf, R. Krishna, and N. A. Smith. TIFA: Accurate and interpretable text-to-image faithfulness evaluation with question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20406-20417, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.896, + 0.885, + 0.914 + ], + "angle": 0, + "content": "[39] R. Islam and O. M. Moushi. GPT-4o: The cutting-edge advancement in multimodal LLM. Technical report, 2024." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.091, + 0.885, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.091, + 0.885, + 0.121 + ], + "angle": 0, + "content": "[40] G. Kamradt. LLMTest NeedleInAHaystack, 2023. URL https://github.com/gkamradt/LLMTestNeedleInAHaystack/blob/main/README.md." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.13, + 0.885, + 0.159 + ], + "angle": 0, + "content": "[41] D. P. Kingma. Auto-encoding variational Bayes. 2nd International Conference on Learning Representations, ICLR, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.168, + 0.885, + 0.213 + ], + "angle": 0, + "content": "[42] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollár, and C. L. Zitnick. Microsoft COCO: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.221, + 0.885, + 0.251 + ], + "angle": 0, + "content": "[43] J. Liu, Y. Song, B. Y. Lin, W. Lam, G. Neubig, Y. Li, and X. Yue. VisualWebBench: How far have multimodal llms evolved in web page understanding and grounding?, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.26, + 0.885, + 0.316 + ], + "angle": 0, + "content": "[44] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao. MM-SafetyBench: A benchmark for safety evaluation of multimodal large language models. In A. Leonardis, E. Ricci, S. Roth, O. Russakovsky, T. Sattler, and G. Varol, editors, Computer Vision – ECCV 2024, pages 386–403, Cham, 2025. Springer Nature Switzerland. ISBN 978-3-031-72992-8." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.326, + 0.885, + 0.355 + ], + "angle": 0, + "content": "[45] Llama Team, AI Meta. The Llama 3 herd of models, 2024. URL https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.365, + 0.885, + 0.409 + ], + "angle": 0, + "content": "[46] P. Lu, B. Peng, H. Cheng, M. Galley, K.-W. Chang, Y. N. Wu, S.-C. Zhu, and J. Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In The 37th Conference on Neural Information Processing Systems (NeurIPS), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.418, + 0.604, + 0.435 + ], + "angle": 0, + "content": "[47] Luma Labs, 2024. URL https://lumalabs.ai/dream-machine." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.443, + 0.885, + 0.472 + ], + "angle": 0, + "content": "[48] L. Madaan, A. K. Singh, R. Schaeffer, A. Poulton, S. Koyejo, P. Stenetorp, S. Narang, and D. Hupkes. Quantifying variance in evaluation benchmarks, 2024. URL https://arxiv.org/abs/2406.10229." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.481, + 0.885, + 0.511 + ], + "angle": 0, + "content": "[49] K. Mangalam, R. Akshulakov, and J. Malik. EgoSchema: A diagnostic benchmark for very long-form video language understanding. In NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.52, + 0.885, + 0.55 + ], + "angle": 0, + "content": "[50] A. Masry, D. X. Long, J. Q. Tan, S. Joty, and E. Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In ACL Findings, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.559, + 0.88, + 0.575 + ], + "angle": 0, + "content": "[51] M. Mathew, D. Karatzas, and C. Jawahar. DocVQA: A dataset for VQA on document images. In WACV, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.584, + 0.885, + 0.628 + ], + "angle": 0, + "content": "[52] N. Mehrabi, P. Goyal, C. Dupuy, Q. Hu, S. Ghosh, R. Zemel, K.-W. Chang, A. Galstyan, and R. Gupta. FLIRT: Feedback loop in-context red teaming. In EMNLP 2024, 2024. URL https://www.amazon.science/publications/flirt-feedback-loop-in-context-red-teaming." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.636, + 0.885, + 0.679 + ], + "angle": 0, + "content": "[53] Meta. Llama 3.2 Github model card vision. https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md#instruction-tuned-models, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.689, + 0.885, + 0.719 + ], + "angle": 0, + "content": "[54] Y. Onoe, S. Rane, Z. Berger, Y. Bitton, J. Cho, R. Garg, A. Ku, Z. Parekh, J. Pont-Tuset, G. Tanzer, et al. DOCCI: Descriptions of connected and contrasting images. URL https://arxiv.org/abs/2404.19753." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.728, + 0.885, + 0.758 + ], + "angle": 0, + "content": "[55] OpenAI. GPT 4o mini. https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.767, + 0.841, + 0.784 + ], + "angle": 0, + "content": "[56] OpenAI. Hello GPT 4o. https://openai.com/index/hello-gpt-4o, 2024. Accessed: 2024-11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.792, + 0.799, + 0.808 + ], + "angle": 0, + "content": "[57] OpenAI Team. simple evals GPT4, 2024. URL https://github.com/openai/simple-evals." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.817, + 0.885, + 0.847 + ], + "angle": 0, + "content": "[58] OpenAI Team. o1 mini system card, 2024. URL https://cdn.openai.com/o1-system-card-20240917.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.855, + 0.885, + 0.913 + ], + "angle": 0, + "content": "[59] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe. Training language models to follow instructions with human feedback. In Advances in Neural Information Processing Systems, volume 35, pages 27730-27744, 2022." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.091, + 0.885, + 0.121 + ], + "angle": 0, + "content": "[60] S. G. Patil, T. Zhang, X. Wang, and J. E. Gonzalez. Gorilla: Large language model connected with massive APIs, 2023. URL https://arxiv.org/abs/2305.15334." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.127, + 0.705, + 0.144 + ], + "angle": 0, + "content": "[61] W. Peebles and S. Xie. Scalable diffusion models with transformers. In ICCV, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.15, + 0.885, + 0.194 + ], + "angle": 0, + "content": "[62] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. In Thirty-seventh Conference on Neural Information Processing Systems, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.201, + 0.885, + 0.313 + ], + "angle": 0, + "content": "[63] R. Rei, J. G. C. de Souza, D. Alves, C. Zerva, A. C. Farinha, T. Glushkova, A. Lavie, L. Coheur, and A. F. T. Martins. COMET-22: Unbabel-IST 2022 submission for the metrics shared task. In P. Koehn, L. Barrault, O. Bojar, F. Bougares, R. Chatterjee, M. R. Costa-jussa, C. Federmann, M. Fishel, A. Fraser, M. Freitag, Y. Graham, R. Grundkiewicz, P. Guzman, B. Haddow, M. Huck, A. Jimeno Yepes, T. Kocmi, A. Martins, M. Morishita, C. Monz, M. Nagata, T. Nakazawa, M. Negri, A. Néveol, M. Neves, M. Popel, M. Turchi, and M. Zampieri, editors, Proceedings of the Seventh Conference on Machine Translation (WMT), pages 578–585, Abu Dhabi, United Arab Emirates (Hybrid), Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.wmt-1.52." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.32, + 0.885, + 0.351 + ], + "angle": 0, + "content": "[64] D. Rein, B. L. Hou, A. C. Stickland, J. Petty, R. Y. Pang, J. Dirani, J. Michael, and S. R. Bowman. GPQA: A graduate-level google-proof Q&A benchmark, 2023. URL https://arxiv.org/abs/2311.12022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.357, + 0.821, + 0.374 + ], + "angle": 0, + "content": "[65] Runway Research, 2024. URL https://runwayml.com/research/introducing-gen-3-alpha." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.38, + 0.885, + 0.423 + ], + "angle": 0, + "content": "[66] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. L. Denton, K. Ghasemipour, R. Gontijo Lopes, B. Karagol Ayan, T. Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.43, + 0.885, + 0.473 + ], + "angle": 0, + "content": "[67] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom. Toolformer: Language models can teach themselves to use tools. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Yacmpz84TH." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.48, + 0.885, + 0.497 + ], + "angle": 0, + "content": "[68] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.503, + 0.885, + 0.572 + ], + "angle": 0, + "content": "[69] U. Shaham, M. Ivgi, A. Efrat, J. Berant, and O. Levy. ZeroSCROLLS: A zero-shot benchmark for long text understanding. In H. Bouamor, J. Pino, and K. Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 7977-7989, Singapore, Dec. 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.536. URL https://aclanthology.org/2023-findings-emnlp.536." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.581, + 0.885, + 0.61 + ], + "angle": 0, + "content": "[70] A. Singh, V. Natarajan, M. Shah, Y. Jiang, X. Chen, D. Batra, D. Parikh, and M. Rohrbach. Towards VQA models that can read. In CVPR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.617, + 0.885, + 0.647 + ], + "angle": 0, + "content": "[71] K. Sun, K. Huang, X. Liu, Y. Wu, Z. Xu, Z. Li, and X. Liu. T2V-CompBench: A comprehensive benchmark for compositional text-to-video generation. arXiv preprint arXiv:2407.14505, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.654, + 0.885, + 0.697 + ], + "angle": 0, + "content": "[72] M. Suzgun, N. Scales, N. Scharli, S. Gehrmann, Y. Tay, H. W. Chung, A. Chowdhery, Q. V. Le, E. H. Chi, D. Zhou, , and J. Wei. Challenging BIG-Bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.704, + 0.885, + 0.775 + ], + "angle": 0, + "content": "[73] N. Team, M. R. Costa-jussa, J. Cross, O. Celebi, M. Elbayad, K. Heafield, K. Heffernan, E. Kalbassi, J. Lam, D. Licht, J. Maillard, A. Sun, S. Wang, G. Wenzek, A. Youngblood, B. Akula, L. Barrault, G. M. Gonzalez, P. Hansanti, J. Hoffman, S. Jarrett, K. R. Sadagopan, D. Rowe, S. Spruit, C. Tran, P. Andrews, N. F. Ayan, S. Bhosale, S. Edunov, A. Fan, C. Gao, V. Goswami, F. Guzmán, P. Koehn, A. Mourachko, C. Ropers, S. Saleem, H. Schwenk, and J. Wang. No language left behind: Scaling human-centered machine translation. 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.782, + 0.885, + 0.812 + ], + "angle": 0, + "content": "[74] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need, 2023. URL https://arxiv.org/abs/1706.03762." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.819, + 0.885, + 0.848 + ], + "angle": 0, + "content": "[75] R. Vedantam, C. L. Zitnick, and D. Parikh. CIDEr: Consensus-based Image Description Evaluation. In CVPR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.856, + 0.885, + 0.913 + ], + "angle": 0, + "content": "[76] A. Wang, R. Y. Pang, A. Chen, J. Phang, and S. R. Bowman. SQuALITY: Building a long-document summarization dataset the hard way. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 1139–1156, Abu Dhabi, United Arab Emirates, Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.75." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.091, + 0.884, + 0.121 + ], + "angle": 0, + "content": "[77] W. Wang, Z. He, W. Hong, Y. Cheng, X. Zhang, J. Qi, X. Gu, S. Huang, B. Xu, Y. Dong, et al. LVBench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.128, + 0.885, + 0.159 + ], + "angle": 0, + "content": "[78] X. Wang, J. Wu, J. Chen, L. Li, Y.-F. Wang, and W. Y. Wang. VATEX: A large-scale, high-quality multilingual dataset for video-and-language research. In ICCV, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.166, + 0.885, + 0.223 + ], + "angle": 0, + "content": "[79] J. Wei, X. Wang, D. Schuurmans, M. Bosma, B. Ichter, F. Xia, E. H. Chi, Q. V. Le, and D. Zhou. Chain-of-thought prompting elicits reasoning in large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, NIPS '22, Red Hook, NY, USA, 2024. Curran Associates Inc. ISBN 9781713871088." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.231, + 0.885, + 0.262 + ], + "angle": 0, + "content": "[80] J. Xu, X. Liu, Y. Wu, Y. Tong, Q. Li, M. Ding, J. Tang, and Y. Dong. ImageReward: Learning and evaluating human preferences for text-to-image generation. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.269, + 0.885, + 0.298 + ], + "angle": 0, + "content": "[81] F. Yan, H. Mao, C. C.-J. Ji, T. Zhang, S. G. Patil, I. Stoica, and J. E. Gonzalez. Berkeley function calling leaderboard. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.306, + 0.885, + 0.364 + ], + "angle": 0, + "content": "[82] X. Yang, K. Sun, H. Xin, Y. Sun, N. Bhalla, X. Chen, S. Choudhary, R. D. Gui, Z. W. Jiang, Z. Jiang, L. Kong, B. Moran, J. Wang, Y. E. Xu, A. Yan, C. Yang, E. Yuan, H. Zha, N. Tang, L. Chen, N. Scheffer, Y. Liu, N. Shah, R. Wanga, A. Kumar, W. tau Yih, and X. L. Dong. Crag – comprehensive rag benchmark. arXiv preprint arXiv:2406.04744, 2024. URL https://arxiv.org/abs/2406.04744." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.371, + 0.885, + 0.402 + ], + "angle": 0, + "content": "[83] S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao. ReAct: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.409, + 0.885, + 0.44 + ], + "angle": 0, + "content": "[84] J. Yu, Y. Xu, J. Y. Koh, T. Luong, G. Baid, Z. Wang, V. Vasudevan, A. Ku, Y. Yang, B. K. Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.447, + 0.885, + 0.49 + ], + "angle": 0, + "content": "[85] X. Yue, Y. Ni, K. Zhang, T. Zheng, R. Liu, G. Zhang, S. Stevens, D. Jiang, W. Ren, Y. Sun, C. Wei, B. Yu, R. Yuan, R. Sun, M. Yin, B. Zheng, Z. Yang, Y. Liu, W. Huang, H. Sun, Y. Su, and W. Chen. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.497, + 0.885, + 0.515 + ], + "angle": 0, + "content": "[86] B. Zheng, B. Gou, J. Kil, H. Sun, and Y. Su. GPT-4V(ison) is a generalist web agent, if grounded. In ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.522, + 0.885, + 0.553 + ], + "angle": 0, + "content": "[87] L. Zheng, Z. Huang, Z. Xue, X. Wang, B. An, and S. Yan. AgentStudio: A toolkit for building general virtual agents. arXiv preprint arXiv:2403.17918, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.56, + 0.885, + 0.604 + ], + "angle": 0, + "content": "[88] M. Zhong, A. Zhang, X. Wang, R. Hou, W. Xiong, C. Zhu, Z. Chen, L. Tan, C. Bi, M. Lewis, S. Popuri, S. Narang, M. Kambadur, D. Mahajan, S. Edunov, J. Han, and L. van der Maaten. Law of the weakest link: Cross capabilities of large language models. arXiv preprint arXiv:2409.19951, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.611, + 0.885, + 0.641 + ], + "angle": 0, + "content": "[89] J. Zhou, T. Lu, S. Mishra, S. Brahma, S. Basu, Y. Luan, D. Zhou, and L. Hou. Instruction-following evaluation for large language models, 2023. URL https://arxiv.org/abs/2311.07911." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.935, + 0.505, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.442, + 0.108 + ], + "angle": 0, + "content": "A Amazon Nova Canvas Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.121, + 0.731, + 0.136 + ], + "angle": 0, + "content": "Our Nova Canvas model offers the following functionalities, with examples given in Figure 5." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.146, + 0.88, + 0.174 + ], + "angle": 0, + "content": "- Text-to-image generation allows customers to create images with various resolutions (from \\(512 \\times 512\\) up to \\(2\\mathrm{K} \\times 2\\mathrm{K}\\) resolution)." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.179, + 0.88, + 0.22 + ], + "angle": 0, + "content": "- Editing allows developers to edit images using a combination of text prompt or mask image. Amazon Nova Canvas supports text-to-image editing and image-to-image editing, including inpainting, outpainting and object removal." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.226, + 0.881, + 0.253 + ], + "angle": 0, + "content": "- Image variation allows customers to output images with similar contents but with variations from the user provided ones." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.259, + 0.881, + 0.286 + ], + "angle": 0, + "content": "- Image conditioning provide a reference image along with a text prompt, resulting in outputs that follow the layout and structure of the user-supplied reference." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.291, + 0.881, + 0.319 + ], + "angle": 0, + "content": "- Image guidance with color palette allows customers to precisely control the color palette of generated images by providing a list of hex codes along with the text prompt." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.324, + 0.814, + 0.338 + ], + "angle": 0, + "content": "- Background removal automatically removes background from images containing multiple objects." + }, + { + "type": "list", + "bbox": [ + 0.157, + 0.146, + 0.881, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "image_caption", + "bbox": [ + 0.167, + 0.213, + 0.294, + 0.237 + ], + "angle": 0, + "content": "A dinosaur sitting in a tea cup" + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.158, + 0.468, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.205, + 0.286, + 0.446, + 0.3 + ], + "angle": 0, + "content": "(a) Image generation from a text prompt" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.21, + 0.657, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.21, + 0.816, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.566, + 0.285, + 0.78, + 0.3 + ], + "angle": 0, + "content": "(b) Inpainting the image with swans" + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.314, + 0.309, + 0.437 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.159, + 0.442, + 0.302, + 0.451 + ], + "angle": 0, + "content": "change flowers to orange color" + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.327, + 0.468, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.273, + 0.466, + 0.377, + 0.48 + ], + "angle": 0, + "content": "(c) Image editing" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.338, + 0.657, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.571, + 0.466, + 0.776, + 0.48 + ], + "angle": 0, + "content": "(d) Outpainting a new background" + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.338, + 0.816, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.495, + 0.309, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.159, + 0.623, + 0.302, + 0.633 + ], + "angle": 0, + "content": "a hamster eats apple slice" + }, + { + "type": "image", + "bbox": [ + 0.311, + 0.508, + 0.468, + 0.629 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.647, + 0.376, + 0.66 + ], + "angle": 0, + "content": "(e) Style transfer" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.495, + 0.657, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.622, + 0.65, + 0.632 + ], + "angle": 0, + "content": "A wooden boat in summer" + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.508, + 0.816, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.608, + 0.647, + 0.738, + 0.661 + ], + "angle": 0, + "content": "(f) Guided generation" + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.713, + 0.303, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.744, + 0.308, + 0.778 + ], + "angle": 0, + "content": "A jar of salad dressing in a rustic kitchen surrounded by fresh vegetables with studio lighting" + }, + { + "type": "image", + "bbox": [ + 0.311, + 0.677, + 0.468, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.803, + 0.42, + 0.817 + ], + "angle": 0, + "content": "(g) Controlling the color palette" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.676, + 0.657, + 0.798 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.596, + 0.803, + 0.749, + 0.817 + ], + "angle": 0, + "content": "(h) Background Removal" + }, + { + "type": "image", + "bbox": [ + 0.667, + 0.711, + 0.802, + 0.781 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.173, + 0.824, + 0.822, + 0.84 + ], + "angle": 0, + "content": "Figure 5: Example capabilities of Amazon Nova Canvas, our content generation model for images." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.33, + 0.109 + ], + "angle": 0, + "content": "B Prompts and Scoring" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.121, + 0.886, + 0.15 + ], + "angle": 0, + "content": "Prompt templates used for Amazon Nova evaluations are given below, along with those used for select other public models where noted. Additional materials and evaluation results from this report can be found at:" + }, + { + "type": "text", + "bbox": [ + 0.353, + 0.162, + 0.643, + 0.177 + ], + "angle": 0, + "content": "https://huggingface.co.amazon-agi" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.192, + 0.265, + 0.206 + ], + "angle": 0, + "content": "B.1 Text evaluation" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.218, + 0.348, + 0.234 + ], + "angle": 0, + "content": "B.1.1 Language Understanding" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.241, + 0.202, + 0.255 + ], + "angle": 0, + "content": "For MMLU:" + }, + { + "type": "code", + "bbox": [ + 0.113, + 0.264, + 0.88, + 0.321 + ], + "angle": 0, + "content": "What is the correct answer to this question: \nChoices: . Let's think step by step: \nBased on the above, what is the single, most likely answer choice? Answer in the format \"The correct answer is (insert answer here).\"" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.337, + 0.2, + 0.351 + ], + "angle": 0, + "content": "For ARC-C:" + }, + { + "type": "code", + "bbox": [ + 0.113, + 0.36, + 0.873, + 0.432 + ], + "angle": 0, + "content": "Given the following question and four candidate answers (A, B, C and D), choose the best answer. \nQuestion: \nYour response should end with \"The best answer is [the_answer_letter]\" where the [the_answer_letter] is one of A, B, C or D." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.448, + 0.192, + 0.461 + ], + "angle": 0, + "content": "For DROP:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.462, + 0.31, + 0.476 + ], + "angle": 0, + "content": "We use the following 6 shots:" + }, + { + "type": "code", + "bbox": [ + 0.129, + 0.485, + 0.801, + 0.913 + ], + "angle": 0, + "content": "- answer: \\(> -\\) According to the passage, the European Coal and Steel Community was established in 1951 and became the EEC in 1958. 1958 - 1951 = 7. So the answer is 7 \npassage: \\(> -\\) Since the 1970s, U.S. governments have negotiated managed-trade agreements, such as the North American Free Trade Agreement in the 1990s, the Dominican Republic-Central America Free Trade Agreement in 2006, and a number of bilateral agreements. In Europe, six countries formed the European Coal and Steel Community in 1951 which became the European Economic Community in 1958. Two core objectives of the EEC were the development of a common market, subsequently renamed the single market, and establishing a customs union between its member states. question: How many years did the European Coal and Steel Community exist? \n- answer: \\(> -\\) According to the passage, \\(23.5\\%\\) ages 18 to 24. \\(23.5\\%\\) \npassage: \\(> -\\) In the county, the population was spread out with \\(23.50\\%\\) 18, \\(8.70\\%\\) \\(13.30\\%\\) \nquestion: \\(> -\\) How many more percent are under the age of 18 compared to the 18 to 24 group? \n- answer: \\(> -\\) According to the passage, Stafford threw 5 TD passes, 3 of which were to Johnson. \\(5 - 3 = 2\\) . So the answer is 2 \npassage: \\(> -\\) Playing in their second straight Thanksgiving game, the Eagles struggled especially on defense, where they were unable to stop the much-hyped Lions offense. The worst of it all was how unproven rookie Eric Rowe was tasked" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "code", + "bbox": [ + 0.111, + 0.088, + 0.889, + 0.761 + ], + "angle": 0, + "content": "with covering wide receiver Calvin Johnson, leading to Johnson catching 3 \ntouchdowns. Stafford's five passing touchdowns, including three of them to \nJohnson was too much for the Eagles to overcome and for the second \nconsecutive time this season, the Eagles gave up 45 points in a game. With \nthe loss, the Eagles drop to 4-7 on the season and 6-1 when playing on \nThanksgiving. \nquestion: How many TD passes did Stafford throw other than to Johnson? \n- answer: \\(>\\) All the touchdown runs are: a 27-yard touchdown run, a 9-yard touchdown run, a 11-yard touchdown run. The smallest number among 27, 9, 11 is 9. So the shortest touchdown run was 9 yards. All the touchdown passes are: a 12-yard touchdown pass. So the longest touchdown pass was 12 yards. So the shortest touchdown run and the longest touchdown pass combine for \\(9 + 12 =\\) 21 yards. So the answer is 21 passage: \\(>\\) The Seahawks played the San Francisco 49ers. In the first quarter, the Hawks RB Julius Jones got a 27-yard TD run, along with DT Craig Terrill returning a fumble 9 yards for a touchdown. In the third quarter, the 49ers almost rallied as RB H. J. Torres made a 12-yard TD pass to Lucas Nelly, along with Mare kicking a 32-yard field goal. In the final quarter, Julius Jones got another 11-yard TD. question: \\(>\\) How many yards do the shortest touchdown run and the longest touchdown pass combine for? \n- answer: \\(>\\) The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. So the Ravens had 10 points at halftime. So the answer is 10 passage: \\(>\\) The Steelers went home for a duel with the Baltimore Ravens. Pittsburgh would deliver the opening punch in the first quarter with a 1-yard touchdown from running back Rashard Mendenhall. The Ravens would make it even as running back Willis McGahee got a 9-yard TD. The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. The Steelers brought the game into overtime with a 38-yard field goal by Andrew Foster. The Ravens Billy Cundiff pulled off a winning 33-yard field goal in overtime. question: How many points did the Ravens have at halftime? \n- answer: \\(>\\) The first and third quarters were the scoreless quarters. So there are 2 scoreless quarters. So the answer is 2 passage: \\(>\\) The Vikings flew to Bank of America Stadium to face the Carolina Panthers. After a scoreless first quarter, Carolina got on the board with quarterback Matt Moore finding fullback Brad Hoover on a 1-yard TD pass. After yet another scoreless quarter, Carolina sealed the game as Matt Moore completed a 42-yard touchdown pass to wide receiver Steve Smith. question: How many scoreless quarters were there?" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.772, + 0.452, + 0.788 + ], + "angle": 0, + "content": "For each shot we provide the following instruction:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.795, + 0.872, + 0.825 + ], + "angle": 0, + "content": "Conclude your answer with: \"So the answer is {final answer}\". Make sure the final answer is in plain text format" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.843, + 0.405, + 0.858 + ], + "angle": 0, + "content": "And we create each user prompt as follows:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.866, + 0.231, + 0.907 + ], + "angle": 0, + "content": " " + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.092, + 0.193, + 0.105 + ], + "angle": 0, + "content": "For IFEval:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.106, + 0.556, + 0.121 + ], + "angle": 0, + "content": "No particular prompt was added (query was inputted to the model)." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.126, + 0.183, + 0.139 + ], + "angle": 0, + "content": "For BBH:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.14, + 0.478, + 0.155 + ], + "angle": 0, + "content": "We use a preamble that describes the task, for example:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.162, + 0.551, + 0.177 + ], + "angle": 0, + "content": "Evaluate the result of a random Boolean expression." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.192, + 0.509, + 0.209 + ], + "angle": 0, + "content": "We then provide few shot examples in the following format:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.214, + 0.613, + 0.285 + ], + "angle": 0, + "content": "< preamble> \nQuestion: \n \nLet's think step by step. \n. So the answer is " + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.3, + 0.331, + 0.316 + ], + "angle": 0, + "content": "And we follow this by the query:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.322, + 0.329, + 0.378 + ], + "angle": 0, + "content": "< preamble> \nQuestion: \n \nLet's think step by step." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.394, + 0.583, + 0.409 + ], + "angle": 0, + "content": "For each subject, We provide the subject-specific instructions as below:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.416, + 0.861, + 0.913 + ], + "angle": 0, + "content": "- subject: booleanExpressions\n instruction: Conclude your answer with: \"So the answer is True or False.\"\n- subject: causal_judgement\n instruction: Conclude your answer with: \"So the answer is Yes or No.\"\n- subject: date_understanding\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: disambiguation_qa\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: dycklanguages\n instruction: Correctly close a Dyck-n word. Conclude your answer with: \"So the answer is {final answer}.\". Make sure the final answer is in plain text format\n- subject: formal_fallacies\n instruction: Conclude your answer with: \"So the answer is valid or invalid.\"\n- subject: geometric_shapes\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: hyperbaton\n instruction: Conclude your answer with: \"\\So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deductionfive Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deduction-seven Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deduction_three Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: movie Recommendation\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: multistep_arithmetic_two\n instruction: Conclude your answer with: \"So the answer is {final answer}.\". Make sure the final answer is in plain text format" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.093, + 0.885, + 0.621 + ], + "angle": 0, + "content": "- subject: navigate\n instruction: Conclude your answer with: \"So the answer is Yes or No\".\n- subject: object_counting\n instruction: Conclude your answer with: \"So the answer is .\". Where is an integer\n- subject: penguins_in_a_table\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: reasoning_about_colored Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: ruin_names\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: salient Translation_error_detector\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: snarks\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: sports-understanding\n instruction: Conclude your answer with: \"So the answer is yes or no\".\n- subject: temporal_sequences\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjectsFive Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjects-seven Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjects_three Objects\n instruction: \"Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: web_of Lies\n instruction: Conclude your answer with: \"So the answer is Yes or No\".\n- subject: wordsorting\n instruction: Conclude your answer with: \"So the answer is word_1 word_2 ... word_n\".\"" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.64, + 0.194, + 0.654 + ], + "angle": 0, + "content": "For GPQA:" + }, + { + "type": "code", + "bbox": [ + 0.111, + 0.669, + 0.882, + 0.727 + ], + "angle": 0, + "content": "What is the correct answer to this question: \nChoices: . Let's think step by step: \nBased on the above, what is the single, most likely answer choice? Answer in the format \"The correct answer is (insert answer here).\"" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.764, + 0.345, + 0.78 + ], + "angle": 0, + "content": "B.1.2 Mathematical Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.79, + 0.259, + 0.805 + ], + "angle": 0, + "content": "For MATH, GSM8K:" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.82, + 0.51, + 0.891 + ], + "angle": 0, + "content": "Solve the following math problem step by step. Remember to put your answer inside \\boxed{}" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.092, + 0.251, + 0.106 + ], + "angle": 0, + "content": "B.1.3 Translation" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.116, + 0.191, + 0.128 + ], + "angle": 0, + "content": "For Flores:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.13, + 0.236, + 0.143 + ], + "angle": 0, + "content": "Nova and LLama:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.152, + 0.88, + 0.182 + ], + "angle": 0, + "content": "Translate the following text into {tgt-lang}. Please output only the translated text with no prefix or introduction: {src}" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.199, + 0.234, + 0.213 + ], + "angle": 0, + "content": "Gemini and GPT:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.222, + 0.862, + 0.252 + ], + "angle": 0, + "content": "Your job is to translate a sentence from {src-lang} into {tgt-lang}. Please output ONLY the translation and nothing else: {src}" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.278, + 0.266, + 0.294 + ], + "angle": 0, + "content": "B.1.4 Long Context" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.302, + 0.884, + 0.332 + ], + "angle": 0, + "content": "For SQuALITY (ZeroScrolls Benchmark), we use the standard prompt template for Amazon Nova and Gemini models as in [69]:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.339, + 0.741, + 0.357 + ], + "angle": 0, + "content": "You are given a story and a question. Answer the question in a paragraph." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.367, + 0.171, + 0.38 + ], + "angle": 0, + "content": "Story:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.382, + 0.179, + 0.396 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.409, + 0.193, + 0.422 + ], + "angle": 0, + "content": "Question:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.424, + 0.203, + 0.437 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.45, + 0.177, + 0.463 + ], + "angle": 0, + "content": "Answer:" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.493, + 0.316, + 0.507 + ], + "angle": 0, + "content": "B.2 Multimodal evaluation" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.518, + 0.229, + 0.532 + ], + "angle": 0, + "content": "B.2.1 MMMU" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.542, + 0.318, + 0.557 + ], + "angle": 0, + "content": "For multiple-choice questions:" + }, + { + "type": "code", + "bbox": [ + 0.113, + 0.565, + 0.852, + 0.802 + ], + "angle": 0, + "content": "With the image, the following question, and the four possible answers (A, B, C and D), select the correct answer. (A) (B) ... (X) - For clear-cut questions: Give the answer directly with minimal elaboration. - For complex questions: Adopt this step-by-step method: ## Step 1: [Concise description] [Brief explanation] ## Step 2: [Concise description] [Brief explanation] In every scenario, conclude with: The best answer is [the_answer_letter]. where [ the_answer_letter] is one of A, B, C or D. Let's proceed with a systematic approach" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.819, + 0.292, + 0.835 + ], + "angle": 0, + "content": "For open-ended questions:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.843, + 0.697, + 0.858 + ], + "angle": 0, + "content": "With the image and the following question, provide a correct answer." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.858, + 0.203, + 0.871 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.884, + 0.773, + 0.899 + ], + "angle": 0, + "content": "- For clear-cut questions: Give the answer directly with minimal elaboration." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.899, + 0.595, + 0.913 + ], + "angle": 0, + "content": "- For complex questions: Adopt this step-by-step method:" + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.884, + 0.773, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.092, + 0.391, + 0.106 + ], + "angle": 0, + "content": "Step 1: [Concise description]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.107, + 0.277, + 0.12 + ], + "angle": 0, + "content": "[Brief explanation]" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.121, + 0.388, + 0.134 + ], + "angle": 0, + "content": "Step 2: [Concise description]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.135, + 0.277, + 0.148 + ], + "angle": 0, + "content": "[Brief explanation]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.16, + 0.852, + 0.204 + ], + "angle": 0, + "content": "In every scenario, conclude with: The best answer is [the_answer Phrase]. where [ the_answer Phrase] is a concise and direct answer to the question Let's proceed with a systematic approach." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.228, + 0.411, + 0.243 + ], + "angle": 0, + "content": "B.2.2 ChartQA, DocVQA, and TextVQA" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.255, + 0.203, + 0.267 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.268, + 0.542, + 0.282 + ], + "angle": 0, + "content": "Answer the question using a single word or phrase." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.307, + 0.223, + 0.32 + ], + "angle": 0, + "content": "B.2.3 VATEX" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.332, + 0.867, + 0.375 + ], + "angle": 0, + "content": "Render a clear and concise one-sentence summary of the video. The summary should be at least 10 words but no more than 20 words. Analyze the video first before summarizing it. Do not hallucinate objects." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.399, + 0.251, + 0.414 + ], + "angle": 0, + "content": "B.2.4 EgoSchema" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.424, + 0.88, + 0.466 + ], + "angle": 0, + "content": "You will be given a question about a video and three possible answer options. You will be provided frames from the video, sampled evenly across the video " + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.468, + 0.236, + 0.479 + ], + "angle": 0, + "content": "(A) " + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.481, + 0.236, + 0.492 + ], + "angle": 0, + "content": "(B) " + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.495, + 0.237, + 0.506 + ], + "angle": 0, + "content": "(C)" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.468, + 0.237, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.508, + 0.661, + 0.522 + ], + "angle": 0, + "content": "Answer with the option's letter from the given choices directly." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.522, + 0.645, + 0.536 + ], + "angle": 0, + "content": "Answer with the option letter from the given choices directly." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.56, + 0.289, + 0.574 + ], + "angle": 0, + "content": "B.2.5 VisualWebBench" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.585, + 0.304, + 0.599 + ], + "angle": 0, + "content": "For the web captioning task:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.607, + 0.869, + 0.647 + ], + "angle": 0, + "content": "\"You are given a screenshot of a webpage. Please generate the meta web description information of this webpage, i.e., content attribute in HTML element." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.662, + 0.833, + 0.69 + ], + "angle": 0, + "content": "You should use this format, and do not output any explanation or any other contents: " + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.707, + 0.292, + 0.722 + ], + "angle": 0, + "content": "For the heading OCR task:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.73, + 0.81, + 0.759 + ], + "angle": 0, + "content": "You are given a screenshot of a webpage. Please generate the main text within the screenshot, which can be regarded as the heading of the webpage." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.771, + 0.878, + 0.8 + ], + "angle": 0, + "content": "You should directly tell me the first sentence of the main content, and do not output any explanation or any other contents." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.816, + 0.258, + 0.831 + ], + "angle": 0, + "content": "For the web QA task:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.84, + 0.203, + 0.852 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.853, + 0.86, + 0.881 + ], + "angle": 0, + "content": "You should directly tell me your answer in the fewest words possible, and do not output any explanation or any other contents." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.898, + 0.292, + 0.911 + ], + "angle": 0, + "content": "For the element OCR task:" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.101, + 0.861, + 0.131 + ], + "angle": 0, + "content": "You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is ." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.143, + 0.837, + 0.172 + ], + "angle": 0, + "content": "Please perform OCR in the bounding box and recognize the text content within the red bounding box." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.19, + 0.315, + 0.205 + ], + "angle": 0, + "content": "For the action prediction task:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.214, + 0.861, + 0.242 + ], + "angle": 0, + "content": "You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is ." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.243, + 0.854, + 0.283 + ], + "angle": 0, + "content": "Please select the best webpage description that matches the new webpage after clicking the selected element in the bounding box: " + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.297, + 0.861, + 0.327 + ], + "angle": 0, + "content": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.345, + 0.327, + 0.36 + ], + "angle": 0, + "content": "For the element grounding task:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.369, + 0.88, + 0.397 + ], + "angle": 0, + "content": "In this website screenshot, I have labeled IDs for some HTML elements as candicates. Tell me which one best matches the description: " + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.41, + 0.861, + 0.439 + ], + "angle": 0, + "content": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.457, + 0.315, + 0.472 + ], + "angle": 0, + "content": "For the action grounding task:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.482, + 0.879, + 0.511 + ], + "angle": 0, + "content": "In this website screenshot, I have labeled IDs for some HTML elements as candidates. Tell me which one I should click to complete the following task: " + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.523, + 0.861, + 0.552 + ], + "angle": 0, + "content": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.58, + 0.285, + 0.594 + ], + "angle": 0, + "content": "B.2.6 MM-Mind2Web" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.608, + 0.88, + 0.706 + ], + "angle": 0, + "content": "Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() functions in playwright respectively). One next step means one operation within the three." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.719, + 0.597, + 0.734 + ], + "angle": 0, + "content": "You are asked to complete the following task: " + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.747, + 0.262, + 0.76 + ], + "angle": 0, + "content": "Previous Actions:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.761, + 0.272, + 0.774 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.775, + 0.517, + 0.788 + ], + "angle": 0, + "content": "The screenshot below shows the webpage you see." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.815, + 0.88, + 0.844 + ], + "angle": 0, + "content": "Follow the following guidance to think step by step before outlining the next action step at the current stage:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.856, + 0.39, + 0.87 + ], + "angle": 0, + "content": "(Current Webpage Identification)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.871, + 0.534, + 0.885 + ], + "angle": 0, + "content": "Firstly, think about what the current webpage is." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.898, + 0.339, + 0.913 + ], + "angle": 0, + "content": "(Previous Action Analysis)" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.871, + 0.135 + ], + "angle": 0, + "content": "Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.147, + 0.364, + 0.162 + ], + "angle": 0, + "content": "(Screenshot Details Analysis)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.162, + 0.87, + 0.258 + ], + "angle": 0, + "content": "Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.271, + 0.484, + 0.286 + ], + "angle": 0, + "content": "(Next Action Based on Webpage and Analysis)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.286, + 0.879, + 0.342 + ], + "angle": 0, + "content": "Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.354, + 0.662, + 0.368 + ], + "angle": 0, + "content": "To be successful, it is important to follow the following rules:" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.369, + 0.713, + 0.382 + ], + "angle": 0, + "content": "1. You should only issue a valid action given the current observation." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.383, + 0.509, + 0.395 + ], + "angle": 0, + "content": "2. You should only issue one action at a time." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.369, + 0.713, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.409, + 0.228, + 0.423 + ], + "angle": 0, + "content": "(Reiteration)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.423, + 0.862, + 0.451 + ], + "angle": 0, + "content": "First, reiterate your next target element, its detailed location, and the corresponding operation." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.464, + 0.305, + 0.478 + ], + "angle": 0, + "content": "(Multichoice Question)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.478, + 0.879, + 0.548 + ], + "angle": 0, + "content": "Below is a multi-choice question, where the choices are elements in the webpage. From the screenshot, find out where and what each one is on the webpage. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by reexamining the screenshot, the choices, and your further reasoning." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.56, + 0.876, + 0.575 + ], + "angle": 0, + "content": "If none of these elements match your target element, please select, select ." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.575, + 0.568, + 0.588 + ], + "angle": 0, + "content": "None of the other options match the correct element." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.589, + 0.765, + 0.602 + ], + "angle": 0, + "content": ". None of the other options match the correct element." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.616, + 0.879, + 0.672 + ], + "angle": 0, + "content": "(Final Answer)Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.685, + 0.177, + 0.698 + ], + "angle": 0, + "content": "Format:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.712, + 0.501, + 0.727 + ], + "angle": 0, + "content": "ELEMENT: The uppercase letter of your choice." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.739, + 0.854, + 0.768 + ], + "angle": 0, + "content": "ACTION: Choose an action from {CLICK, TYPE, SELECT, NONE}. Use NONE only if you choose option F for the ELEMENT" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.781, + 0.525, + 0.795 + ], + "angle": 0, + "content": "VALUE: Provide additional input based on ACTION." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.809, + 0.253, + 0.822 + ], + "angle": 0, + "content": "The VALUE means:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.823, + 0.526, + 0.837 + ], + "angle": 0, + "content": "If ACTION == TYPE, specify the text to be typed." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.837, + 0.568, + 0.85 + ], + "angle": 0, + "content": "If ACTION == SELECT, specify the option to be chosen." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.85, + 0.397, + 0.863 + ], + "angle": 0, + "content": "If ACTION == CLICK, write \"None\"." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.893, + 0.27, + 0.908 + ], + "angle": 0, + "content": "B.2.7 GroundUI-1K" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.101, + 0.88, + 0.131 + ], + "angle": 0, + "content": "Which action should I do if I want to Click on and where is the action? Express the location coordinates using the (x1, y1, x2, y2) format, scaled between 0 and 1000." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.157, + 0.321, + 0.172 + ], + "angle": 0, + "content": "B.3 Functional Capabilities" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.182, + 0.218, + 0.198 + ], + "angle": 0, + "content": "B.3.1 FinQA" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.208, + 0.88, + 0.265 + ], + "angle": 0, + "content": "Given the following finance question, analyze the question in details step-by-step before giving the final answer. Your answer should begin with \"Lets think step-by-step\". Your response should end with \"The answer is [the_final_answer]\", where [the_final_answer] should be the most concise answer without any explanation." + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.278, + 0.261, + 0.389 + ], + "angle": 0, + "content": "```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```\n```\n``" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.406, + 0.884, + 0.449 + ], + "angle": 0, + "content": "We use regex \"The answer is (.*)\" to extract the answer. We convert answers with percent signs and magnitude terms to decimal numerical representation (e.g. convert \\(1.3\\%\\) to 0.013 and \"5.2 millions\" to 5,200,000). An answer is correct if it is identical to the ground truth when rounded to the same decimal places." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.462, + 0.206, + 0.476 + ], + "angle": 0, + "content": "B.3.2 RAG" + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.489, + 0.88, + 0.808 + ], + "angle": 0, + "content": "You are a teacher grading a quiz. \nYou are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. \nExample Format: \nQUESTION: question here \nSTUDENT ANSWER: student's answer here \nTRUE ANSWER: true answer here \nGRADE: Correct or Incorrect here \nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \nQUESTION: {query} \nSTUDENT ANSWER: {answer} \nTRUE ANSWER: {expected_answer} \nGRADE: \nYour response should be in json format as follows: \n{{ \"justification\": (Without mentioning the student/teacher framing of this prompt, explain why the STUDENT ANSWER is Correct or Incorrect. Use one or two sentences maximum. Keep the answer as concise as possible.), \"grade\": (correct or incorrect) \n}}" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.551, + 0.109 + ], + "angle": 0, + "content": "C Qualitative examples of multimodal intelligence" + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.14, + 0.159, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.167, + 0.141, + 0.257, + 0.158 + ], + "angle": 0, + "content": "User input" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.179, + 0.47, + 0.223 + ], + "angle": 0, + "content": "This looks like an amazing site. What is it and where is it located? Are there other fun things around this site to explore?" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.226, + 0.434, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.14, + 0.544, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.55, + 0.141, + 0.631, + 0.158 + ], + "angle": 0, + "content": "Response" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.179, + 0.849, + 0.281 + ], + "angle": 0, + "content": "This is the Space Needle, a landmark in Seattle, Washington, USA. It is a well-known observation tower and a symbol of Seattle. It was built for the 1962 World's Fair and has since become a popular tourist attraction. The Space Needle offers panoramic views of the city, the surrounding mountains, and the Puget Sound." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.282, + 0.86, + 0.471 + ], + "angle": 0, + "content": "The Space Needle is located in the Seattle Center, which is a cultural and entertainment hub. The Seattle Center is home to many other attractions, including the Chihuly Garden and Glass, the Museum of Pop Culture, and the Pacific Science Center. The Seattle Center also hosts events like concerts, festivals, and art exhibitions. The Seattle Center is also close to other popular attractions in Seattle, such as the Pike Place Market, the Seattle Aquarium, and the Seattle Art Museum. The city is known for its vibrant arts and culture scene, outdoor activities, and beautiful natural surroundings." + }, + { + "type": "image_caption", + "bbox": [ + 0.281, + 0.486, + 0.715, + 0.502 + ], + "angle": 0, + "content": "Figure 6: Created with Nova Pro. Photo taken by a team member." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.131, + 0.156, + 0.543, + 0.171 + ], + "angle": 0, + "content": "User input: \"Search for Psych and buy the first episode\"" + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.192, + 0.21, + 0.204 + ], + "angle": 0, + "content": "Agent step 1" + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.207, + 0.495, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.192, + 0.602, + 0.204 + ], + "angle": 0, + "content": "Agent step 2" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.207, + 0.885, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.523, + 0.212, + 0.535 + ], + "angle": 0, + "content": "Agent step 3" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.538, + 0.495, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.523, + 0.604, + 0.535 + ], + "angle": 0, + "content": "Agent step 4" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.538, + 0.885, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.36, + 0.836, + 0.638, + 0.851 + ], + "angle": 0, + "content": "Figure 7: Example of a multimodal agent." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.044, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.333, + 0.157, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.167, + 0.334, + 0.254, + 0.35 + ], + "angle": 0, + "content": "User input" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.371, + 0.31, + 0.399 + ], + "angle": 0, + "content": "What happened to my tire? What should I do?" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.408, + 0.387, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.46, + 0.334, + 0.48, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.486, + 0.335, + 0.565, + 0.351 + ], + "angle": 0, + "content": "Response" + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.372, + 0.831, + 0.413 + ], + "angle": 0, + "content": "It appears that your tire has a significant puncture, as evidenced by the deflated state and visible damage. Here's what you should do:" + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.415, + 0.845, + 0.442 + ], + "angle": 0, + "content": "1. Assess the Damage: Confirm the puncture by checking for any visible holes or debris embedded in the tire." + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.444, + 0.851, + 0.457 + ], + "angle": 0, + "content": "2. Check Tire Pressure: Ensure the tire is completely deflated." + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.457, + 0.855, + 0.485 + ], + "angle": 0, + "content": "3. Remove the Tire: If you're comfortable, you can remove the wheel to inspect the puncture more closely." + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.486, + 0.843, + 0.514 + ], + "angle": 0, + "content": "4. Temporary Fix: Use a tire repair kit to patch the hole if it's small. This is a temporary solution." + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.515, + 0.856, + 0.556 + ], + "angle": 0, + "content": "5. Professional Repair: Visit a tire shop for a proper repair or replacement. They can assess if the tire can be patched or if a new tire is needed." + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.558, + 0.841, + 0.586 + ], + "angle": 0, + "content": "6. Inspect Other Tires: Check the other tires for any signs of damage or wear." + }, + { + "type": "list", + "bbox": [ + 0.459, + 0.415, + 0.856, + 0.586 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.459, + 0.587, + 0.822, + 0.614 + ], + "angle": 0, + "content": "Would you like more detailed instructions on any of these steps?" + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.66, + 0.677, + 0.675 + ], + "angle": 0, + "content": "Figure 8: Created with Nova Pro. Image source: [88]." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.101, + 0.159, + 0.119 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.167, + 0.102, + 0.258, + 0.119 + ], + "angle": 0, + "content": "User input" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.14, + 0.454, + 0.17 + ], + "angle": 0, + "content": "Group this sales data by month using pandas and then create a bar chart of the sales per month." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.175, + 0.391, + 0.351 + ], + "angle": 0, + "content": "
MonthSales
Jan1523.13
Jan957
Feb133
March452
May521
June901
July1334
July131
Aug2601
Dec3005
" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.371, + 0.278, + 0.386 + ], + "angle": 0, + "content": "Generated plot:" + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.39, + 0.45, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.102, + 0.544, + 0.118 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.55, + 0.103, + 0.632, + 0.119 + ], + "angle": 0, + "content": "Response" + }, + { + "type": "code", + "bbox": [ + 0.52, + 0.137, + 0.866, + 0.445 + ], + "angle": 0, + "content": "import pandas as pd \nimport matplotlib.pyplot as plt \n# Data \ndata \\(=\\) { Month':['Jan','Jan','Feb','March','May','June','July' July','Aug','Dec'], Sales':[1523.13,957,133,452,521,901,1334,131,2601,3005] } \n# Create DataFrame df \\(=\\) pd.DataFrame(data) \n# Group by month and sum sales monthly_sales \\(=\\) df.groupby('Month')[Sales'].sum().reset_index() \n# Sort months based on calendar monthly_sales['Month'] \\(=\\) pd.Categorical( monthly_sales['Month'], categories \\(\\coloneqq\\) ['Jan','Feb','March','April','May','June','July', Aug,'Sept','Oct','Nov','Dec'], ordered=True \nmonthsales \\(=\\) monthly_sales.sort_values('Month') \n# Plot bar chart plt.figure(figsize=(10,6)) plt.bar(monthly_sales['Month'], monthly_sales['Sales'], color \\(\\equiv\\) 'blue') plt.xlabel('Month') pltylabel('Total Sales') plt.title('Total Sales per Month') plt.show()" + }, + { + "type": "image_caption", + "bbox": [ + 0.387, + 0.54, + 0.61, + 0.555 + ], + "angle": 0, + "content": "Figure 9: Created with Nova Pro." + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.577, + 0.159, + 0.595 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.167, + 0.578, + 0.257, + 0.595 + ], + "angle": 0, + "content": "User input" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.616, + 0.411, + 0.646 + ], + "angle": 0, + "content": "How many people say the European Union responded well to the pandemic?" + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.668, + 0.413, + 0.693 + ], + "angle": 0, + "content": "Poor marks for America's response to coronavirus outbreak" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.696, + 0.404, + 0.716 + ], + "angle": 0, + "content": "% who say each has done a job dealing with the coronavirus outbreak" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.72, + 0.425, + 0.804 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.808, + 0.424, + 0.851 + ], + "angle": 0, + "content": "Note: Percentages are medians based on 13 countries. Those who did not answer are not shown. Source: Summer 2020 Global Attitudes Survey. Q10a-e. U.S. Image Plummets Internationally as Most Say Country Has Handled Coronavirus Badly" + }, + { + "type": "text", + "bbox": [ + 0.179, + 0.853, + 0.274, + 0.862 + ], + "angle": 0, + "content": "PEW RESEARCH CENTER" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.578, + 0.544, + 0.595 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.55, + 0.579, + 0.632, + 0.596 + ], + "angle": 0, + "content": "Response" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.616, + 0.861, + 0.718 + ], + "angle": 0, + "content": "The chart shows that \\(57\\%\\) of people say the European Union responded well to the pandemic. This is indicated by the green bar labeled \"Good\" for the EU, which extends to a height of \\(57\\%\\). This suggests that a majority of the people surveyed had a positive view of the EU's response to the pandemic." + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.893, + 0.739, + 0.909 + ], + "angle": 0, + "content": "Figure 10: Created with Nova Lite. Image source: ChartQA dataset [50]." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.371, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.44, + 0.108 + ], + "angle": 0, + "content": "D Correspondence and Contributors" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.121, + 0.355, + 0.135 + ], + "angle": 0, + "content": "Please direct all correspondences to:" + }, + { + "type": "text", + "bbox": [ + 0.344, + 0.151, + 0.654, + 0.167 + ], + "angle": 0, + "content": "nova-technical-report@amazon.com" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.182, + 0.885, + 0.21 + ], + "angle": 0, + "content": "The Nova family of models were built by the Amazon Artificial General Intelligence (AGI) organization and partner teams." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.216, + 0.803, + 0.233 + ], + "angle": 0, + "content": "When citing this report, please use \"Amazon AGI\" as the sole author, as shown in the bibtex entry below." + }, + { + "type": "code", + "bbox": [ + 0.114, + 0.239, + 0.816, + 0.337 + ], + "angle": 0, + "content": "@misc{novatechreport, author = {Amazon AGI}, title = {The Amazon Nova Family of Models: Technical Report and Model Card}, year = {2024}, url = {https://www.amazon.science/publications/the-amazon-nova-family-of-models-technical-report-and-model-card} }" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.365, + 0.251, + 0.379 + ], + "angle": 0, + "content": "D.1 Contributors" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.39, + 0.885, + 0.419 + ], + "angle": 0, + "content": "The following individuals worked in the Nova program for at least one-fifth of its duration and measurably impacted one or more of the models or services described in this report." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.433, + 0.224, + 0.447 + ], + "angle": 0, + "content": "Aaron Langford" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.448, + 0.204, + 0.46 + ], + "angle": 0, + "content": "Aayush Shah" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.462, + 0.227, + 0.475 + ], + "angle": 0, + "content": "Abhanshu Gupta" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.476, + 0.246, + 0.488 + ], + "angle": 0, + "content": "Abhimanyu Bhatter" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.489, + 0.217, + 0.502 + ], + "angle": 0, + "content": "Abhinav Goyal" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.503, + 0.226, + 0.515 + ], + "angle": 0, + "content": "Abhinav Mathur" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.517, + 0.236, + 0.53 + ], + "angle": 0, + "content": "Abhinav Mohanty" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.531, + 0.23, + 0.542 + ], + "angle": 0, + "content": "Abhishek Kumar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.544, + 0.217, + 0.555 + ], + "angle": 0, + "content": "Abhishek Sethi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.558, + 0.198, + 0.57 + ], + "angle": 0, + "content": "Abi Komma" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.572, + 0.194, + 0.583 + ], + "angle": 0, + "content": "Abner Pena" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.585, + 0.187, + 0.597 + ], + "angle": 0, + "content": "Achin Jain" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.6, + 0.211, + 0.612 + ], + "angle": 0, + "content": "Adam Kunysz" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.613, + 0.221, + 0.626 + ], + "angle": 0, + "content": "Adam Opyrchal" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.627, + 0.206, + 0.64 + ], + "angle": 0, + "content": "Adarsh Singh" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.641, + 0.207, + 0.653 + ], + "angle": 0, + "content": "Aditya Rawal" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.654, + 0.3, + 0.666 + ], + "angle": 0, + "content": "Adok Achar Budihal Prasad" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.668, + 0.226, + 0.68 + ], + "angle": 0, + "content": "Adrià de Gispert" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.682, + 0.214, + 0.695 + ], + "angle": 0, + "content": "Agnika Kumar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.696, + 0.26, + 0.709 + ], + "angle": 0, + "content": "Aishwarya Aryamane" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.71, + 0.182, + 0.723 + ], + "angle": 0, + "content": "Ajay Nair" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.724, + 0.18, + 0.735 + ], + "angle": 0, + "content": "Akilan M" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.738, + 0.228, + 0.75 + ], + "angle": 0, + "content": "Akshaya Iyengar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.751, + 0.353, + 0.764 + ], + "angle": 0, + "content": "Akshaya Vishnu Kudlu Shanbhogue" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.765, + 0.172, + 0.776 + ], + "angle": 0, + "content": "Alan He" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.779, + 0.249, + 0.79 + ], + "angle": 0, + "content": "Alessandra Cervone" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.792, + 0.187, + 0.803 + ], + "angle": 0, + "content": "Alex Loeb" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.805, + 0.195, + 0.818 + ], + "angle": 0, + "content": "Alex Zhang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.82, + 0.206, + 0.831 + ], + "angle": 0, + "content": "Alexander Fu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.833, + 0.269, + 0.845 + ], + "angle": 0, + "content": "Alexander Lisnichenko" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.847, + 0.227, + 0.861 + ], + "angle": 0, + "content": "Alexander Zhipa" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.862, + 0.272, + 0.873 + ], + "angle": 0, + "content": "Alexandros Potamianos" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.875, + 0.227, + 0.888 + ], + "angle": 0, + "content": "Ali Kebarighotbi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.889, + 0.258, + 0.9 + ], + "angle": 0, + "content": "Aliakbar Daronkolaei" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.433, + 0.471, + 0.446 + ], + "angle": 0, + "content": "Alok Parmesh" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.447, + 0.517, + 0.46 + ], + "angle": 0, + "content": "Amanjot Kaur Samra" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.462, + 0.464, + 0.473 + ], + "angle": 0, + "content": "Ameen Khan" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.475, + 0.444, + 0.487 + ], + "angle": 0, + "content": "Amer Rez" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.489, + 0.46, + 0.5 + ], + "angle": 0, + "content": "Amir Saffari" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.503, + 0.482, + 0.515 + ], + "angle": 0, + "content": "Amit Agarwalla" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.517, + 0.461, + 0.528 + ], + "angle": 0, + "content": "Amit Jhindal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.53, + 0.49, + 0.542 + ], + "angle": 0, + "content": "Amith Mamidala" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.544, + 0.475, + 0.555 + ], + "angle": 0, + "content": "Ammar Asmro" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.558, + 0.491, + 0.57 + ], + "angle": 0, + "content": "Amulya Ballakur" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.572, + 0.472, + 0.583 + ], + "angle": 0, + "content": "Anand Mishra" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.585, + 0.488, + 0.597 + ], + "angle": 0, + "content": "Anand Sridharan" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.6, + 0.51, + 0.611 + ], + "angle": 0, + "content": "Anastasiia Dubinina" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.613, + 0.456, + 0.625 + ], + "angle": 0, + "content": "Andre Lenz" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.627, + 0.474, + 0.638 + ], + "angle": 0, + "content": "Andreas Doerr" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.641, + 0.484, + 0.654 + ], + "angle": 0, + "content": "Andrew Keating" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.655, + 0.478, + 0.666 + ], + "angle": 0, + "content": "Andrew Leaver" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.668, + 0.473, + 0.68 + ], + "angle": 0, + "content": "Andrew Smith" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.681, + 0.471, + 0.693 + ], + "angle": 0, + "content": "Andrew Wirth" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.696, + 0.459, + 0.708 + ], + "angle": 0, + "content": "Andy Davey" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.71, + 0.495, + 0.722 + ], + "angle": 0, + "content": "Andy Rosenbaum" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.723, + 0.451, + 0.735 + ], + "angle": 0, + "content": "Andy Sohn" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.737, + 0.463, + 0.75 + ], + "angle": 0, + "content": "Angela Chan" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.751, + 0.503, + 0.763 + ], + "angle": 0, + "content": "Aniket Chakrabarti" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.764, + 0.497, + 0.776 + ], + "angle": 0, + "content": "Anil Ramakrishna" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.779, + 0.461, + 0.791 + ], + "angle": 0, + "content": "Anirban Roy" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.793, + 0.444, + 0.805 + ], + "angle": 0, + "content": "Anita Iyer" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.806, + 0.517, + 0.818 + ], + "angle": 0, + "content": "Anjali Narayan-Chen" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.82, + 0.468, + 0.831 + ], + "angle": 0, + "content": "Ankith Yennu" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.833, + 0.491, + 0.845 + ], + "angle": 0, + "content": "Anna Dabrowska" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.847, + 0.494, + 0.859 + ], + "angle": 0, + "content": "Anna Gawlowska" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.861, + 0.49, + 0.874 + ], + "angle": 0, + "content": "Anna Rumshisky" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.875, + 0.455, + 0.886 + ], + "angle": 0, + "content": "Anna Turek" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.888, + 0.472, + 0.902 + ], + "angle": 0, + "content": "Anoop Deoras" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.433, + 0.758, + 0.446 + ], + "angle": 0, + "content": "Anton Bezruchkin" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.447, + 0.722, + 0.46 + ], + "angle": 0, + "content": "Anup Prasad" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.462, + 0.744, + 0.475 + ], + "angle": 0, + "content": "Anupam Dewan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.476, + 0.728, + 0.487 + ], + "angle": 0, + "content": "Anwith Kiran" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.489, + 0.732, + 0.502 + ], + "angle": 0, + "content": "Apoory Gupta" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.503, + 0.736, + 0.516 + ], + "angle": 0, + "content": "Aram Galstyan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.517, + 0.769, + 0.529 + ], + "angle": 0, + "content": "Aravind Manoharan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.53, + 0.724, + 0.543 + ], + "angle": 0, + "content": "Arijit Biswas" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.544, + 0.749, + 0.556 + ], + "angle": 0, + "content": "Arindam Mandal" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.558, + 0.716, + 0.57 + ], + "angle": 0, + "content": "Arpit Gupta" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.572, + 0.761, + 0.584 + ], + "angle": 0, + "content": "Arsamkhan Pathan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.585, + 0.742, + 0.599 + ], + "angle": 0, + "content": "Arun Nagarajan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.6, + 0.782, + 0.612 + ], + "angle": 0, + "content": "Arushan Rajasekaram" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.613, + 0.774, + 0.625 + ], + "angle": 0, + "content": "Arvind Sundararajan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.627, + 0.748, + 0.638 + ], + "angle": 0, + "content": "Ashwin Ganesan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.64, + 0.781, + 0.653 + ], + "angle": 0, + "content": "Ashwin Swaminathan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.654, + 0.788, + 0.666 + ], + "angle": 0, + "content": "Athanasios Mouchtaris" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.668, + 0.76, + 0.681 + ], + "angle": 0, + "content": "Audrey Champeau" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.682, + 0.7, + 0.695 + ], + "angle": 0, + "content": "Avik Ray" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.696, + 0.731, + 0.708 + ], + "angle": 0, + "content": "Ayush Jaiswal" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.71, + 0.733, + 0.722 + ], + "angle": 0, + "content": "Ayush Sharma" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.723, + 0.729, + 0.735 + ], + "angle": 0, + "content": "Bailey Keefer" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.737, + 0.785, + 0.75 + ], + "angle": 0, + "content": "Balamurugan Muthiah" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.751, + 0.772, + 0.763 + ], + "angle": 0, + "content": "Beatrix Leon-Millan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.764, + 0.732, + 0.777 + ], + "angle": 0, + "content": "Ben Koopman" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.779, + 0.683, + 0.79 + ], + "angle": 0, + "content": "Ben Li" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.792, + 0.744, + 0.805 + ], + "angle": 0, + "content": "Benjamin Biggs" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.806, + 0.726, + 0.818 + ], + "angle": 0, + "content": "Benjamin Ott" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.82, + 0.753, + 0.831 + ], + "angle": 0, + "content": "Bhanu Vinzamuri" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.833, + 0.761, + 0.845 + ], + "angle": 0, + "content": "Bharath Venkatesh" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.847, + 0.747, + 0.859 + ], + "angle": 0, + "content": "Bhavana Ganesh" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.861, + 0.743, + 0.872 + ], + "angle": 0, + "content": "Bhoomit Vasani" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.875, + 0.707, + 0.887 + ], + "angle": 0, + "content": "Bill Byrne" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.888, + 0.694, + 0.9 + ], + "angle": 0, + "content": "Bill Hsu" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.091, + 0.223, + 0.107 + ], + "angle": 0, + "content": "Bincheng Wang" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.107, + 0.192, + 0.12 + ], + "angle": 0, + "content": "Blake King" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.121, + 0.205, + 0.134 + ], + "angle": 0, + "content": "Blazej Gorny" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.135, + 0.174, + 0.148 + ], + "angle": 0, + "content": "Bo Feng" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.148, + 0.183, + 0.161 + ], + "angle": 0, + "content": "Bo Zheng" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.162, + 0.232, + 0.174 + ], + "angle": 0, + "content": "Bodhisattwa Paul" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.176, + 0.187, + 0.187 + ], + "angle": 0, + "content": "Bofan Sun" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.189, + 0.196, + 0.202 + ], + "angle": 0, + "content": "Bofeng Luo" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.203, + 0.202, + 0.215 + ], + "angle": 0, + "content": "Bowen Chen" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.217, + 0.191, + 0.228 + ], + "angle": 0, + "content": "Bowen Xie" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.23, + 0.174, + 0.244 + ], + "angle": 0, + "content": "Boya Yu" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.245, + 0.214, + 0.257 + ], + "angle": 0, + "content": "Brendan Jugan" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.258, + 0.201, + 0.27 + ], + "angle": 0, + "content": "Brett Panosh" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.272, + 0.205, + 0.284 + ], + "angle": 0, + "content": "Brian Collins" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.286, + 0.227, + 0.299 + ], + "angle": 0, + "content": "Brian Thompson" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.3, + 0.202, + 0.312 + ], + "angle": 0, + "content": "Can Karakus" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.314, + 0.171, + 0.325 + ], + "angle": 0, + "content": "Can Liu" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.327, + 0.22, + 0.339 + ], + "angle": 0, + "content": "Carl Lambrecht" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.341, + 0.18, + 0.353 + ], + "angle": 0, + "content": "Carly Lin" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.355, + 0.211, + 0.368 + ], + "angle": 0, + "content": "Carolyn Wang" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.369, + 0.195, + 0.381 + ], + "angle": 0, + "content": "Carrie Yuan" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.383, + 0.203, + 0.395 + ], + "angle": 0, + "content": "Casey Loyda" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.396, + 0.223, + 0.409 + ], + "angle": 0, + "content": "Cezary Walczak" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.41, + 0.243, + 0.424 + ], + "angle": 0, + "content": "Chalapathi Choppa" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.424, + 0.279, + 0.436 + ], + "angle": 0, + "content": "Chandana Satya Prakash" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.437, + 0.274, + 0.45 + ], + "angle": 0, + "content": "Chankrisna Richy Meas" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.452, + 0.203, + 0.464 + ], + "angle": 0, + "content": "Charith Peris" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.465, + 0.225, + 0.477 + ], + "angle": 0, + "content": "Charles Recaido" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.479, + 0.189, + 0.49 + ], + "angle": 0, + "content": "Charlie Xu" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.492, + 0.215, + 0.504 + ], + "angle": 0, + "content": "Charul Sharma" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.506, + 0.209, + 0.518 + ], + "angle": 0, + "content": "Chase Kernan" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.52, + 0.248, + 0.533 + ], + "angle": 0, + "content": "Chayut Thanapirom" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.534, + 0.205, + 0.546 + ], + "angle": 0, + "content": "Chengwei Su" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.548, + 0.199, + 0.559 + ], + "angle": 0, + "content": "Chenhao Xu" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.562, + 0.203, + 0.573 + ], + "angle": 0, + "content": "Chenhao Yin" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.575, + 0.194, + 0.586 + ], + "angle": 0, + "content": "Chentao Ye" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.589, + 0.211, + 0.602 + ], + "angle": 0, + "content": "Chenyang Tao" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.603, + 0.27, + 0.615 + ], + "angle": 0, + "content": "Chethan Parameshwara" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.616, + 0.236, + 0.63 + ], + "angle": 0, + "content": "Ching-Yun Chang" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.631, + 0.179, + 0.643 + ], + "angle": 0, + "content": "Chong Li" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.645, + 0.199, + 0.656 + ], + "angle": 0, + "content": "Chris Hench" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.658, + 0.187, + 0.669 + ], + "angle": 0, + "content": "Chris Tran" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.672, + 0.237, + 0.685 + ], + "angle": 0, + "content": "Christophe Dupuy" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.686, + 0.236, + 0.699 + ], + "angle": 0, + "content": "Christopher Davis" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.7, + 0.256, + 0.712 + ], + "angle": 0, + "content": "Christopher DiPersio" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.713, + 0.304, + 0.726 + ], + "angle": 0, + "content": "Christos Christodoulopoulos" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.727, + 0.184, + 0.739 + ], + "angle": 0, + "content": "Christy Li" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.741, + 0.191, + 0.752 + ], + "angle": 0, + "content": "Chun Chen" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.754, + 0.241, + 0.766 + ], + "angle": 0, + "content": "Claudio Delli Bovi" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.768, + 0.221, + 0.782 + ], + "angle": 0, + "content": "Clement Chung" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.782, + 0.21, + 0.794 + ], + "angle": 0, + "content": "Cole Hawkins" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.796, + 0.211, + 0.807 + ], + "angle": 0, + "content": "Connor Harris" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.81, + 0.204, + 0.822 + ], + "angle": 0, + "content": "Corey Ropell" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.823, + 0.193, + 0.836 + ], + "angle": 0, + "content": "Cynthia He" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.837, + 0.168, + 0.848 + ], + "angle": 0, + "content": "DK Joo" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.851, + 0.225, + 0.864 + ], + "angle": 0, + "content": "Dae Yon Hwang" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.865, + 0.19, + 0.876 + ], + "angle": 0, + "content": "Dan Rosen" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.878, + 0.209, + 0.89 + ], + "angle": 0, + "content": "Daniel Elkind" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.892, + 0.211, + 0.904 + ], + "angle": 0, + "content": "Daniel Pressel" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.092, + 0.468, + 0.107 + ], + "angle": 0, + "content": "Daniel Zhang" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.107, + 0.49, + 0.119 + ], + "angle": 0, + "content": "Danielle Kimball" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.12, + 0.474, + 0.132 + ], + "angle": 0, + "content": "Daniil Sorokin" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.134, + 0.468, + 0.146 + ], + "angle": 0, + "content": "Dave Goodell" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.148, + 0.48, + 0.159 + ], + "angle": 0, + "content": "Davide Modolo" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.161, + 0.451, + 0.173 + ], + "angle": 0, + "content": "Dawei Zhu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.175, + 0.488, + 0.188 + ], + "angle": 0, + "content": "Deepikaa Suresh" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.189, + 0.467, + 0.202 + ], + "angle": 0, + "content": "Deepti Raga" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.203, + 0.487, + 0.215 + ], + "angle": 0, + "content": "Denis Filimonov" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.217, + 0.484, + 0.228 + ], + "angle": 0, + "content": "Denis Foo Kune" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.231, + 0.564, + 0.244 + ], + "angle": 0, + "content": "Denis Romasanta Rodriguez" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.245, + 0.519, + 0.257 + ], + "angle": 0, + "content": "Devamanyu Hazarika" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.258, + 0.483, + 0.271 + ], + "angle": 0, + "content": "Dhananjay Ram" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.272, + 0.475, + 0.284 + ], + "angle": 0, + "content": "Dhawal Parkar" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.286, + 0.465, + 0.298 + ], + "angle": 0, + "content": "Dhawal Patel" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.3, + 0.475, + 0.311 + ], + "angle": 0, + "content": "Dhwanil Desai" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.314, + 0.514, + 0.327 + ], + "angle": 0, + "content": "Dinesh Singh Rajput" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.328, + 0.45, + 0.339 + ], + "angle": 0, + "content": "Disha Sule" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.341, + 0.476, + 0.354 + ], + "angle": 0, + "content": "Diwakar Singh" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.355, + 0.479, + 0.367 + ], + "angle": 0, + "content": "Dmitriy Genzel" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.369, + 0.496, + 0.381 + ], + "angle": 0, + "content": "Dolly Goldenberg" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.383, + 0.45, + 0.395 + ], + "angle": 0, + "content": "Dongyi He" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.396, + 0.484, + 0.408 + ], + "angle": 0, + "content": "Dumitru Hanciu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.41, + 0.487, + 0.422 + ], + "angle": 0, + "content": "Dushan Tharmal" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.424, + 0.509, + 0.436 + ], + "angle": 0, + "content": "Dzmitry Siankovich" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.437, + 0.456, + 0.449 + ], + "angle": 0, + "content": "Edi Cikovic" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.452, + 0.485, + 0.463 + ], + "angle": 0, + "content": "Edwin Abraham" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.465, + 0.468, + 0.477 + ], + "angle": 0, + "content": "Ekraam Sabir" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.479, + 0.462, + 0.49 + ], + "angle": 0, + "content": "Elliott Olson" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.492, + 0.477, + 0.504 + ], + "angle": 0, + "content": "Emmett Steven" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.506, + 0.453, + 0.518 + ], + "angle": 0, + "content": "Emre Barut" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.52, + 0.461, + 0.532 + ], + "angle": 0, + "content": "Eric Jackson" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.534, + 0.442, + 0.546 + ], + "angle": 0, + "content": "Ethan Wu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.548, + 0.462, + 0.561 + ], + "angle": 0, + "content": "Evelyn Chen" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.562, + 0.514, + 0.574 + ], + "angle": 0, + "content": "Ezhilan Mahalingam" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.575, + 0.504, + 0.587 + ], + "angle": 0, + "content": "Fabian Triefenbach" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.589, + 0.44, + 0.602 + ], + "angle": 0, + "content": "Fan Yang" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.603, + 0.453, + 0.616 + ], + "angle": 0, + "content": "Fangyu Liu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.617, + 0.441, + 0.628 + ], + "angle": 0, + "content": "Fanzi Wu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.63, + 0.474, + 0.642 + ], + "angle": 0, + "content": "Faraz Tavakoli" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.644, + 0.502, + 0.656 + ], + "angle": 0, + "content": "Farhad Khozeimeh" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.658, + 0.459, + 0.671 + ], + "angle": 0, + "content": "Feiyang Niu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.672, + 0.46, + 0.684 + ], + "angle": 0, + "content": "Felix Hieber" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.686, + 0.429, + 0.698 + ], + "angle": 0, + "content": "Feng Li" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.7, + 0.451, + 0.712 + ], + "angle": 0, + "content": "First Elbey" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.713, + 0.468, + 0.725 + ], + "angle": 0, + "content": "Florian Krebs" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.727, + 0.468, + 0.74 + ], + "angle": 0, + "content": "Florian Saupe" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.741, + 0.491, + 0.754 + ], + "angle": 0, + "content": "Florian Sprunken" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.755, + 0.444, + 0.766 + ], + "angle": 0, + "content": "Frank Fan" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.768, + 0.463, + 0.781 + ], + "angle": 0, + "content": "Furqan Khan" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.782, + 0.523, + 0.794 + ], + "angle": 0, + "content": "Gabriela De Vincenzo" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.795, + 0.491, + 0.809 + ], + "angle": 0, + "content": "Gagandeep Kang" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.81, + 0.462, + 0.822 + ], + "angle": 0, + "content": "George Ding" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.823, + 0.449, + 0.836 + ], + "angle": 0, + "content": "George He" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.837, + 0.472, + 0.85 + ], + "angle": 0, + "content": "George Yeung" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.851, + 0.493, + 0.863 + ], + "angle": 0, + "content": "Ghada Qaddoumi" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.865, + 0.531, + 0.876 + ], + "angle": 0, + "content": "Giannis Karamanolakis" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.878, + 0.502, + 0.891 + ], + "angle": 0, + "content": "Goeric Huybrechts" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.892, + 0.477, + 0.904 + ], + "angle": 0, + "content": "Gokul Maddali" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.092, + 0.747, + 0.106 + ], + "angle": 0, + "content": "Gonzalo Iglesias" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.107, + 0.754, + 0.119 + ], + "angle": 0, + "content": "Gordon McShane" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.12, + 0.722, + 0.132 + ], + "angle": 0, + "content": "Gozde Sahin" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.134, + 0.746, + 0.147 + ], + "angle": 0, + "content": "Guangtai Huang" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.148, + 0.749, + 0.161 + ], + "angle": 0, + "content": "Gukyeong Kwon" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.162, + 0.784, + 0.175 + ], + "angle": 0, + "content": "Gunnar A. Sigurdsson" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.176, + 0.75, + 0.188 + ], + "angle": 0, + "content": "Gurpreet Chadha" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.189, + 0.74, + 0.203 + ], + "angle": 0, + "content": "Gururaj Kosuru" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.204, + 0.756, + 0.216 + ], + "angle": 0, + "content": "Hagen Fuerstenau" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.217, + 0.697, + 0.228 + ], + "angle": 0, + "content": "Hah Hah" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.231, + 0.73, + 0.243 + ], + "angle": 0, + "content": "Haja Maideen" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.245, + 0.759, + 0.257 + ], + "angle": 0, + "content": "Hajime Hosokawa" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.258, + 0.693, + 0.269 + ], + "angle": 0, + "content": "Han Liu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.272, + 0.725, + 0.284 + ], + "angle": 0, + "content": "Han-Kai Hsu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.286, + 0.715, + 0.299 + ], + "angle": 0, + "content": "Hann Wang" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.3, + 0.684, + 0.311 + ], + "angle": 0, + "content": "Hao Li" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.314, + 0.703, + 0.326 + ], + "angle": 0, + "content": "Hao Yang" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.328, + 0.724, + 0.34 + ], + "angle": 0, + "content": "Haofeng Zhu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.341, + 0.732, + 0.354 + ], + "angle": 0, + "content": "Haozheng Fan" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.355, + 0.733, + 0.368 + ], + "angle": 0, + "content": "Harman Singh" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.369, + 0.799, + 0.381 + ], + "angle": 0, + "content": "Harshavardhan Kaluvala" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.383, + 0.732, + 0.394 + ], + "angle": 0, + "content": "Hashim Saeed" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.396, + 0.685, + 0.407 + ], + "angle": 0, + "content": "He Xie" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.41, + 0.718, + 0.422 + ], + "angle": 0, + "content": "Helian Feng" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.424, + 0.696, + 0.436 + ], + "angle": 0, + "content": "Hendrix" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.438, + 0.718, + 0.45 + ], + "angle": 0, + "content": "Hengzhi Pei" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.452, + 0.737, + 0.463 + ], + "angle": 0, + "content": "Henrik Nielsen" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.465, + 0.714, + 0.477 + ], + "angle": 0, + "content": "Hesam Ilati" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.479, + 0.742, + 0.49 + ], + "angle": 0, + "content": "Himanshu Patel" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.492, + 0.723, + 0.505 + ], + "angle": 0, + "content": "Hongshan Li" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.507, + 0.733, + 0.519 + ], + "angle": 0, + "content": "Hongzhou Lin" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.52, + 0.727, + 0.532 + ], + "angle": 0, + "content": "Hussain Raza" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.534, + 0.719, + 0.545 + ], + "angle": 0, + "content": "Ian Cullinan" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.548, + 0.702, + 0.559 + ], + "angle": 0, + "content": "Imre Kiss" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.562, + 0.787, + 0.574 + ], + "angle": 0, + "content": "Inbarasan Thangamani" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.576, + 0.763, + 0.587 + ], + "angle": 0, + "content": "Indrayani Fadnavis" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.589, + 0.781, + 0.601 + ], + "angle": 0, + "content": "Ionut Teodor Sorodoc" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.603, + 0.722, + 0.614 + ], + "angle": 0, + "content": "Irem Ertuerk" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.617, + 0.769, + 0.63 + ], + "angle": 0, + "content": "Iryna Yemialyanava" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.631, + 0.707, + 0.642 + ], + "angle": 0, + "content": "Ishan Soni" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.644, + 0.712, + 0.655 + ], + "angle": 0, + "content": "Ismail Jelal" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.658, + 0.695, + 0.669 + ], + "angle": 0, + "content": "Ivan Tse" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.672, + 0.741, + 0.683 + ], + "angle": 0, + "content": "Jack FitzGerald" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.686, + 0.705, + 0.697 + ], + "angle": 0, + "content": "Jack Zhao" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.7, + 0.749, + 0.712 + ], + "angle": 0, + "content": "Jackson Rothgeb" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.713, + 0.703, + 0.725 + ], + "angle": 0, + "content": "Jacky Lee" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.727, + 0.701, + 0.74 + ], + "angle": 0, + "content": "Jake Jung" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.741, + 0.726, + 0.752 + ], + "angle": 0, + "content": "Jakub Debski" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.755, + 0.739, + 0.766 + ], + "angle": 0, + "content": "Jakub Tomczak" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.768, + 0.711, + 0.779 + ], + "angle": 0, + "content": "James Jeun" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.782, + 0.733, + 0.793 + ], + "angle": 0, + "content": "James Sanders" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.796, + 0.733, + 0.809 + ], + "angle": 0, + "content": "Jason Crowley" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.81, + 0.688, + 0.822 + ], + "angle": 0, + "content": "Jay Lee" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.823, + 0.808, + 0.836 + ], + "angle": 0, + "content": "Jayakrishna Anvesh Paidy" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.837, + 0.726, + 0.85 + ], + "angle": 0, + "content": "Jayant Tiwari" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.851, + 0.718, + 0.862 + ], + "angle": 0, + "content": "Jean Farmer" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.865, + 0.723, + 0.877 + ], + "angle": 0, + "content": "Jeff Solinsky" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.879, + 0.704, + 0.889 + ], + "angle": 0, + "content": "Jenna Lau" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.892, + 0.754, + 0.905 + ], + "angle": 0, + "content": "Jeremy Savareese" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.092, + 0.214, + 0.106 + ], + "angle": 0, + "content": "Jerzy Zagorski" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.107, + 0.156, + 0.118 + ], + "angle": 0, + "content": "Ji Dai" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.12, + 0.231, + 0.134 + ], + "angle": 0, + "content": "Jiacheng (JC) Gu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.135, + 0.175, + 0.146 + ], + "angle": 0, + "content": "Jiahui Li" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.148, + 0.247, + 0.162 + ], + "angle": 0, + "content": "Jian (Skyler) Zheng" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.163, + 0.19, + 0.173 + ], + "angle": 0, + "content": "Jianhua Lu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.176, + 0.209, + 0.189 + ], + "angle": 0, + "content": "Jianhua Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.19, + 0.186, + 0.201 + ], + "angle": 0, + "content": "Jiawei Dai" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.203, + 0.186, + 0.214 + ], + "angle": 0, + "content": "Jiawei Mo" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.216, + 0.172, + 0.228 + ], + "angle": 0, + "content": "Jiaxi Xu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.231, + 0.178, + 0.244 + ], + "angle": 0, + "content": "Jie Liang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.245, + 0.174, + 0.258 + ], + "angle": 0, + "content": "Jie Yang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.259, + 0.186, + 0.271 + ], + "angle": 0, + "content": "Jim Logan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.272, + 0.222, + 0.285 + ], + "angle": 0, + "content": "Jimit Majmudar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.286, + 0.171, + 0.299 + ], + "angle": 0, + "content": "Jing Liu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.3, + 0.215, + 0.313 + ], + "angle": 0, + "content": "Jinghong Miao" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.314, + 0.177, + 0.327 + ], + "angle": 0, + "content": "Jingru Yi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.328, + 0.199, + 0.34 + ], + "angle": 0, + "content": "Jingyang Jin" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.342, + 0.199, + 0.353 + ], + "angle": 0, + "content": "Jiun-Yu Kao" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.355, + 0.202, + 0.368 + ], + "angle": 0, + "content": "Jixuan Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.37, + 0.201, + 0.382 + ], + "angle": 0, + "content": "Jiyang Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.383, + 0.214, + 0.395 + ], + "angle": 0, + "content": "Joe Pemberton" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.397, + 0.197, + 0.408 + ], + "angle": 0, + "content": "Joel Carlson" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.41, + 0.207, + 0.422 + ], + "angle": 0, + "content": "Joey Blundell" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.424, + 0.214, + 0.436 + ], + "angle": 0, + "content": "John Chin-Jew" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.438, + 0.171, + 0.449 + ], + "angle": 0, + "content": "John He" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.452, + 0.199, + 0.464 + ], + "angle": 0, + "content": "Jonathan Ho" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.466, + 0.226, + 0.477 + ], + "angle": 0, + "content": "Jonathan Hueser" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.479, + 0.21, + 0.49 + ], + "angle": 0, + "content": "Jonathan Lunt" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.493, + 0.209, + 0.505 + ], + "angle": 0, + "content": "Jooyoung Lee" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.507, + 0.19, + 0.519 + ], + "angle": 0, + "content": "Joshua Tan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.521, + 0.225, + 0.534 + ], + "angle": 0, + "content": "Joyjit Chatterjee" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.535, + 0.214, + 0.547 + ], + "angle": 0, + "content": "Judith Gaspers" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.548, + 0.181, + 0.561 + ], + "angle": 0, + "content": "Jue Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.563, + 0.176, + 0.575 + ], + "angle": 0, + "content": "Jun Fang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.576, + 0.176, + 0.588 + ], + "angle": 0, + "content": "Jun Tang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.59, + 0.174, + 0.601 + ], + "angle": 0, + "content": "Jun Wan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.603, + 0.167, + 0.614 + ], + "angle": 0, + "content": "Jun Wu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.617, + 0.198, + 0.629 + ], + "angle": 0, + "content": "Junlei Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.631, + 0.179, + 0.643 + ], + "angle": 0, + "content": "Junyi Shi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.645, + 0.191, + 0.656 + ], + "angle": 0, + "content": "Justin Chiu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.658, + 0.214, + 0.67 + ], + "angle": 0, + "content": "Justin Satriano" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.672, + 0.184, + 0.683 + ], + "angle": 0, + "content": "Justin Yee" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.686, + 0.217, + 0.698 + ], + "angle": 0, + "content": "Jwala Dhamala" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.7, + 0.197, + 0.712 + ], + "angle": 0, + "content": "Jyoti Bansal" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.713, + 0.178, + 0.725 + ], + "angle": 0, + "content": "Kai Zhen" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.727, + 0.218, + 0.74 + ], + "angle": 0, + "content": "Kai-Wei Chang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.741, + 0.204, + 0.754 + ], + "angle": 0, + "content": "Kaixiang Lin" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.755, + 0.214, + 0.767 + ], + "angle": 0, + "content": "Kalyan Raman" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.769, + 0.33, + 0.781 + ], + "angle": 0, + "content": "Kanthashree Mysore Sathyendra" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.782, + 0.213, + 0.793 + ], + "angle": 0, + "content": "Karabo Moroe" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.796, + 0.237, + 0.807 + ], + "angle": 0, + "content": "Karan Bhandarkar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.81, + 0.21, + 0.821 + ], + "angle": 0, + "content": "Karan Kothari" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.824, + 0.252, + 0.835 + ], + "angle": 0, + "content": "Karolina Owczarzak" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.837, + 0.264, + 0.85 + ], + "angle": 0, + "content": "Karthick Gopalswamy" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.851, + 0.209, + 0.862 + ], + "angle": 0, + "content": "Karthick Ravi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.865, + 0.265, + 0.876 + ], + "angle": 0, + "content": "Karthik Ramakrishnan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.879, + 0.253, + 0.891 + ], + "angle": 0, + "content": "Karthika Arumugam" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.892, + 0.205, + 0.904 + ], + "angle": 0, + "content": "Kartik Mehta" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.093, + 0.525, + 0.106 + ], + "angle": 0, + "content": "Katarzyna Konczalska" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.107, + 0.496, + 0.119 + ], + "angle": 0, + "content": "Kavya Ravikumar" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.12, + 0.431, + 0.132 + ], + "angle": 0, + "content": "Ke Tran" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.135, + 0.455, + 0.147 + ], + "angle": 0, + "content": "Kochen Qin" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.149, + 0.432, + 0.159 + ], + "angle": 0, + "content": "Kelin Li" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.162, + 0.44, + 0.173 + ], + "angle": 0, + "content": "Kelvin Li" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.176, + 0.478, + 0.187 + ], + "angle": 0, + "content": "Ketan Kulkarni" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.19, + 0.54, + 0.203 + ], + "angle": 0, + "content": "Kevin Angelo Rodrigues" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.204, + 0.454, + 0.216 + ], + "angle": 0, + "content": "Keyur Patel" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.217, + 0.49, + 0.229 + ], + "angle": 0, + "content": "Khadige Abboud" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.231, + 0.464, + 0.244 + ], + "angle": 0, + "content": "Kiana Hajebi" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.245, + 0.46, + 0.256 + ], + "angle": 0, + "content": "Klaus Reiter" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.258, + 0.459, + 0.27 + ], + "angle": 0, + "content": "Kris Schultz" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.272, + 0.489, + 0.285 + ], + "angle": 0, + "content": "Krishna Anisetty" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.286, + 0.488, + 0.298 + ], + "angle": 0, + "content": "Krishna Kotnana" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.3, + 0.445, + 0.311 + ], + "angle": 0, + "content": "Kristen Li" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.314, + 0.557, + 0.327 + ], + "angle": 0, + "content": "Kruthi Channamallikarjuna" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.328, + 0.515, + 0.34 + ], + "angle": 0, + "content": "Krzysztof Jakubczyk" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.342, + 0.474, + 0.354 + ], + "angle": 0, + "content": "Kuba Pierewoj" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.355, + 0.442, + 0.366 + ], + "angle": 0, + "content": "Kunal Pal" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.369, + 0.494, + 0.38 + ], + "angle": 0, + "content": "Kunwar Srivastav" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.383, + 0.488, + 0.394 + ], + "angle": 0, + "content": "Kyle Bannerman" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.396, + 0.471, + 0.408 + ], + "angle": 0, + "content": "Lahari Poddar" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.41, + 0.482, + 0.422 + ], + "angle": 0, + "content": "Lakshmi Prasad" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.424, + 0.458, + 0.436 + ], + "angle": 0, + "content": "Larry Tseng" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.438, + 0.484, + 0.45 + ], + "angle": 0, + "content": "Laxmikant Naik" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.452, + 0.559, + 0.463 + ], + "angle": 0, + "content": "Leena Chennuru Vankadara" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.466, + 0.482, + 0.477 + ], + "angle": 0, + "content": "Lenon Minorics" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.479, + 0.43, + 0.49 + ], + "angle": 0, + "content": "Leo Liu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.493, + 0.483, + 0.505 + ], + "angle": 0, + "content": "Leonard Lausen" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.507, + 0.528, + 0.518 + ], + "angle": 0, + "content": "Leonardo F. R. Ribeiro" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.521, + 0.438, + 0.532 + ], + "angle": 0, + "content": "Li Zhang" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.535, + 0.473, + 0.545 + ], + "angle": 0, + "content": "Lili Gehorsam" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.548, + 0.429, + 0.56 + ], + "angle": 0, + "content": "Ling Qi" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.563, + 0.449, + 0.573 + ], + "angle": 0, + "content": "Lisa Bauer" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.576, + 0.453, + 0.588 + ], + "angle": 0, + "content": "Lori Knapp" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.591, + 0.433, + 0.602 + ], + "angle": 0, + "content": "Lu Zeng" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.604, + 0.453, + 0.616 + ], + "angle": 0, + "content": "Lucas Tong" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.618, + 0.452, + 0.63 + ], + "angle": 0, + "content": "Lulu Wong" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.632, + 0.464, + 0.642 + ], + "angle": 0, + "content": "Luoxin Chen" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.645, + 0.486, + 0.657 + ], + "angle": 0, + "content": "Maciej Rudnicki" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.659, + 0.493, + 0.67 + ], + "angle": 0, + "content": "Mahdi Namazifar" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.672, + 0.504, + 0.683 + ], + "angle": 0, + "content": "Mahesh Jaliminche" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.686, + 0.514, + 0.697 + ], + "angle": 0, + "content": "Maira Ladeira Tanke" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.7, + 0.469, + 0.712 + ], + "angle": 0, + "content": "Manasi Gupta" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.713, + 0.498, + 0.726 + ], + "angle": 0, + "content": "Mandeep Ahlawat" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.727, + 0.472, + 0.739 + ], + "angle": 0, + "content": "Mani Khanuja" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.741, + 0.482, + 0.753 + ], + "angle": 0, + "content": "Mani Sundaram" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.755, + 0.462, + 0.767 + ], + "angle": 0, + "content": "Marcin Leyk" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.769, + 0.5, + 0.78 + ], + "angle": 0, + "content": "Mariusz Momotko" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.782, + 0.472, + 0.793 + ], + "angle": 0, + "content": "Markus Boese" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.796, + 0.477, + 0.808 + ], + "angle": 0, + "content": "Markus Dreyer" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.81, + 0.484, + 0.821 + ], + "angle": 0, + "content": "Markus Mueller" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.824, + 0.443, + 0.835 + ], + "angle": 0, + "content": "Mason Fu" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.837, + 0.481, + 0.849 + ], + "angle": 0, + "content": "Mateusz Górski" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.851, + 0.525, + 0.863 + ], + "angle": 0, + "content": "Mateusz Mastalerczyk" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.865, + 0.462, + 0.876 + ], + "angle": 0, + "content": "Matias Mora" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.879, + 0.466, + 0.89 + ], + "angle": 0, + "content": "Matt Johnson" + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.892, + 0.446, + 0.904 + ], + "angle": 0, + "content": "Matt Scott" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.093, + 0.73, + 0.105 + ], + "angle": 0, + "content": "Matthew Wen" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.107, + 0.725, + 0.12 + ], + "angle": 0, + "content": "Max Barysau" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.121, + 0.764, + 0.133 + ], + "angle": 0, + "content": "Maya Bouerdassi" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.135, + 0.738, + 0.147 + ], + "angle": 0, + "content": "Maya Krishnan" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.149, + 0.735, + 0.161 + ], + "angle": 0, + "content": "Mayank Gupta" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.163, + 0.736, + 0.175 + ], + "angle": 0, + "content": "Mayank Hirani" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.176, + 0.753, + 0.188 + ], + "angle": 0, + "content": "Mayank Kulkarni" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.19, + 0.819, + 0.203 + ], + "angle": 0, + "content": "Meganathan Narayanasamy" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.204, + 0.755, + 0.215 + ], + "angle": 0, + "content": "Melanie Bradford" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.217, + 0.729, + 0.228 + ], + "angle": 0, + "content": "Melanie Gens" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.231, + 0.732, + 0.242 + ], + "angle": 0, + "content": "Melissa Burke" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.245, + 0.699, + 0.257 + ], + "angle": 0, + "content": "Meng Jin" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.259, + 0.71, + 0.27 + ], + "angle": 0, + "content": "Miao Chen" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.272, + 0.769, + 0.284 + ], + "angle": 0, + "content": "Michael Denkowski" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.286, + 0.747, + 0.299 + ], + "angle": 0, + "content": "Michael Heymel" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.3, + 0.783, + 0.313 + ], + "angle": 0, + "content": "Michael Krestyaninov" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.314, + 0.734, + 0.325 + ], + "angle": 0, + "content": "Michal Obirek" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.328, + 0.798, + 0.339 + ], + "angle": 0, + "content": "Michalina Wichorowska" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.341, + 0.728, + 0.352 + ], + "angle": 0, + "content": "Michal Miotk" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.355, + 0.743, + 0.366 + ], + "angle": 0, + "content": "Milosz Watroba" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.369, + 0.726, + 0.382 + ], + "angle": 0, + "content": "Mingyi Hong" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.383, + 0.716, + 0.395 + ], + "angle": 0, + "content": "Mingzhi Yu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.397, + 0.72, + 0.408 + ], + "angle": 0, + "content": "Miranda Liu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.41, + 0.752, + 0.422 + ], + "angle": 0, + "content": "Mohamed Gouda" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.424, + 0.793, + 0.436 + ], + "angle": 0, + "content": "Mohammad El-Shabani" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.438, + 0.813, + 0.449 + ], + "angle": 0, + "content": "Mohammad Ghavamzadeh" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.452, + 0.726, + 0.463 + ], + "angle": 0, + "content": "Mohit Bansal" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.466, + 0.739, + 0.477 + ], + "angle": 0, + "content": "Morteza Ziyadi" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.48, + 0.693, + 0.49 + ], + "angle": 0, + "content": "Nan Xia" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.493, + 0.733, + 0.505 + ], + "angle": 0, + "content": "Nathan Susanj" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.507, + 0.714, + 0.518 + ], + "angle": 0, + "content": "Nav Bhasin" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.521, + 0.739, + 0.532 + ], + "angle": 0, + "content": "Neha Goswami" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.534, + 0.756, + 0.546 + ], + "angle": 0, + "content": "Nehal Belgamwar" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.549, + 0.779, + 0.56 + ], + "angle": 0, + "content": "Nicolas Anastassacos" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.563, + 0.751, + 0.574 + ], + "angle": 0, + "content": "Nicolas Bergeron" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.576, + 0.705, + 0.587 + ], + "angle": 0, + "content": "Nidhi Jain" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.59, + 0.705, + 0.601 + ], + "angle": 0, + "content": "Nihal Jain" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.604, + 0.779, + 0.616 + ], + "angle": 0, + "content": "Niharika Chopparapu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.618, + 0.686, + 0.628 + ], + "angle": 0, + "content": "Nik Xu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.631, + 0.723, + 0.642 + ], + "angle": 0, + "content": "Nikko Strom" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.645, + 0.782, + 0.656 + ], + "angle": 0, + "content": "Nikolaos Malandrakis" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.659, + 0.744, + 0.669 + ], + "angle": 0, + "content": "Nimisha Mishra" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.672, + 0.724, + 0.683 + ], + "angle": 0, + "content": "Ninad Parkhi" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.686, + 0.75, + 0.697 + ], + "angle": 0, + "content": "Ninareh Mehrabi" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.7, + 0.719, + 0.71 + ], + "angle": 0, + "content": "Nishita Sant" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.713, + 0.733, + 0.726 + ], + "angle": 0, + "content": "Nishtha Gupta" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.727, + 0.73, + 0.739 + ], + "angle": 0, + "content": "Nitesh Sekhar" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.741, + 0.729, + 0.754 + ], + "angle": 0, + "content": "Nithin Rajeev" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.755, + 0.816, + 0.767 + ], + "angle": 0, + "content": "Nithish Raja Chidambaram" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.769, + 0.715, + 0.779 + ], + "angle": 0, + "content": "Nitish Dhar" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.782, + 0.75, + 0.795 + ], + "angle": 0, + "content": "Noor Bhagwagar" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.796, + 0.729, + 0.809 + ], + "angle": 0, + "content": "Noy Konforty" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.81, + 0.713, + 0.82 + ], + "angle": 0, + "content": "Omar Babu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.824, + 0.724, + 0.835 + ], + "angle": 0, + "content": "Omid Razavi" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.837, + 0.756, + 0.85 + ], + "angle": 0, + "content": "Orchid Majumder" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.851, + 0.712, + 0.862 + ], + "angle": 0, + "content": "Osama Dar" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.865, + 0.706, + 0.876 + ], + "angle": 0, + "content": "Oscar Hsu" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.879, + 0.723, + 0.889 + ], + "angle": 0, + "content": "Pablo Kvitca" + }, + { + "type": "text", + "bbox": [ + 0.637, + 0.892, + 0.734, + 0.905 + ], + "angle": 0, + "content": "Pallavi Pandey" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.091, + 0.236, + 0.106 + ], + "angle": 0, + "content": "Parker Seegmiller" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.107, + 0.209, + 0.12 + ], + "angle": 0, + "content": "Patrick Lange" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.121, + 0.2, + 0.133 + ], + "angle": 0, + "content": "Paul Ferraro" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.135, + 0.217, + 0.147 + ], + "angle": 0, + "content": "Payal Motwani" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.148, + 0.226, + 0.161 + ], + "angle": 0, + "content": "Pegah Kharazmi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.162, + 0.181, + 0.175 + ], + "angle": 0, + "content": "Pei Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.176, + 0.195, + 0.188 + ], + "angle": 0, + "content": "Pengfei Liu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.19, + 0.207, + 0.201 + ], + "angle": 0, + "content": "Peter Bradtke" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.203, + 0.187, + 0.215 + ], + "angle": 0, + "content": "Peter Gotoz" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.217, + 0.19, + 0.228 + ], + "angle": 0, + "content": "Peter Zhou" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.231, + 0.204, + 0.244 + ], + "angle": 0, + "content": "Pichao Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.245, + 0.203, + 0.257 + ], + "angle": 0, + "content": "Piotr Poskart" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.258, + 0.226, + 0.271 + ], + "angle": 0, + "content": "Pooja Sonawane" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.272, + 0.237, + 0.285 + ], + "angle": 0, + "content": "Pradeep Natarajan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.286, + 0.248, + 0.299 + ], + "angle": 0, + "content": "Pradyun Ramadorai" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.3, + 0.201, + 0.312 + ], + "angle": 0, + "content": "Pralam Shah" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.314, + 0.22, + 0.325 + ], + "angle": 0, + "content": "Prasad Nirantar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.327, + 0.233, + 0.339 + ], + "angle": 0, + "content": "Prasanthi Chavali" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.341, + 0.265, + 0.354 + ], + "angle": 0, + "content": "Prashan Wanigasekara" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.355, + 0.213, + 0.366 + ], + "angle": 0, + "content": "Prashant Saraf" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.368, + 0.201, + 0.382 + ], + "angle": 0, + "content": "Prashun Dey" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.383, + 0.208, + 0.395 + ], + "angle": 0, + "content": "Pratyush Pant" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.396, + 0.217, + 0.408 + ], + "angle": 0, + "content": "Prerak Pradhan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.41, + 0.198, + 0.423 + ], + "angle": 0, + "content": "Preyaa Patel" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.424, + 0.231, + 0.436 + ], + "angle": 0, + "content": "Priyanka Dadlani" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.438, + 0.298, + 0.449 + ], + "angle": 0, + "content": "Prudhvee Narasimha Sadha" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.452, + 0.174, + 0.464 + ], + "angle": 0, + "content": "Qi Dong" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.466, + 0.174, + 0.477 + ], + "angle": 0, + "content": "Qian Hu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.479, + 0.229, + 0.491 + ], + "angle": 0, + "content": "Qiaozi (QZ) Gao" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.493, + 0.178, + 0.506 + ], + "angle": 0, + "content": "Qing Liu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.507, + 0.192, + 0.519 + ], + "angle": 0, + "content": "Quinn Lam" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.521, + 0.186, + 0.532 + ], + "angle": 0, + "content": "Quynh Do" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.534, + 0.206, + 0.545 + ], + "angle": 0, + "content": "R. Manmatha" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.547, + 0.206, + 0.559 + ], + "angle": 0, + "content": "Rachel Willis" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.562, + 0.187, + 0.573 + ], + "angle": 0, + "content": "Rafael Liu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.575, + 0.194, + 0.587 + ], + "angle": 0, + "content": "Rafal Ellert" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.589, + 0.212, + 0.601 + ], + "angle": 0, + "content": "Rafal Kalinski" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.603, + 0.218, + 0.615 + ], + "angle": 0, + "content": "Rafi Al Attrach" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.617, + 0.207, + 0.629 + ], + "angle": 0, + "content": "Ragha Prasad" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.63, + 0.209, + 0.643 + ], + "angle": 0, + "content": "Ragini Prasad" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.644, + 0.221, + 0.657 + ], + "angle": 0, + "content": "Raguvir Kunani" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.658, + 0.201, + 0.671 + ], + "angle": 0, + "content": "Rahul Gupta" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.673, + 0.21, + 0.683 + ], + "angle": 0, + "content": "Rahul Sharma" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.685, + 0.205, + 0.697 + ], + "angle": 0, + "content": "Rahul Tewari" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.699, + 0.257, + 0.712 + ], + "angle": 0, + "content": "Rajaganesh Baskaran" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.713, + 0.198, + 0.726 + ], + "angle": 0, + "content": "Rajan Singh" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.727, + 0.197, + 0.74 + ], + "angle": 0, + "content": "Rajiv Gupta" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.741, + 0.199, + 0.754 + ], + "angle": 0, + "content": "Rajiv Reddy" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.755, + 0.22, + 0.767 + ], + "angle": 0, + "content": "Rajshekhar Das" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.768, + 0.211, + 0.78 + ], + "angle": 0, + "content": "Rakesh Chada" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.782, + 0.315, + 0.794 + ], + "angle": 0, + "content": "Rakesh Vaideeswaran Mahesh" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.795, + 0.257, + 0.808 + ], + "angle": 0, + "content": "Ram Chandrasekaran" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.81, + 0.234, + 0.822 + ], + "angle": 0, + "content": "Ramesh Nallapati" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.824, + 0.175, + 0.835 + ], + "angle": 0, + "content": "Ran Xue" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.837, + 0.27, + 0.85 + ], + "angle": 0, + "content": "Rashmi Gangadharaiah" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.851, + 0.233, + 0.863 + ], + "angle": 0, + "content": "Ravi Rachakonda" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.865, + 0.218, + 0.878 + ], + "angle": 0, + "content": "Renxian Zhang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.879, + 0.235, + 0.89 + ], + "angle": 0, + "content": "Rexhina Blloshmi" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.892, + 0.231, + 0.905 + ], + "angle": 0, + "content": "Rishabh Agrawal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.092, + 0.473, + 0.106 + ], + "angle": 0, + "content": "Robert Enyedi" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.107, + 0.464, + 0.119 + ], + "angle": 0, + "content": "Robert Lowe" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.12, + 0.478, + 0.132 + ], + "angle": 0, + "content": "Robik Shrestha" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.134, + 0.513, + 0.146 + ], + "angle": 0, + "content": "Robinson Piramuthu" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.148, + 0.459, + 0.16 + ], + "angle": 0, + "content": "Rohail Asad" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.162, + 0.476, + 0.174 + ], + "angle": 0, + "content": "Rohan Khanna" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.176, + 0.495, + 0.189 + ], + "angle": 0, + "content": "Rohan Mukherjee" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.19, + 0.458, + 0.201 + ], + "angle": 0, + "content": "Rohit Mittal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.203, + 0.461, + 0.215 + ], + "angle": 0, + "content": "Rohit Prasad" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.217, + 0.572, + 0.23 + ], + "angle": 0, + "content": "Rohith Mysore Vijaya Kumar" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.231, + 0.466, + 0.243 + ], + "angle": 0, + "content": "Ron Diamant" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.245, + 0.473, + 0.257 + ], + "angle": 0, + "content": "Ruchita Gupta" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.258, + 0.447, + 0.27 + ], + "angle": 0, + "content": "Ruiwen Li" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.272, + 0.452, + 0.285 + ], + "angle": 0, + "content": "Ruoying Li" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.286, + 0.486, + 0.299 + ], + "angle": 0, + "content": "RushabhFegade" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.3, + 0.459, + 0.312 + ], + "angle": 0, + "content": "Ruxu Zhang" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.314, + 0.461, + 0.326 + ], + "angle": 0, + "content": "Ryan Arbow" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.328, + 0.451, + 0.339 + ], + "angle": 0, + "content": "Ryan Chen" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.341, + 0.473, + 0.353 + ], + "angle": 0, + "content": "Ryan Gabbard" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.355, + 0.462, + 0.366 + ], + "angle": 0, + "content": "Ryan Hoium" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.369, + 0.449, + 0.381 + ], + "angle": 0, + "content": "Ryan King" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.383, + 0.506, + 0.395 + ], + "angle": 0, + "content": "Sabarishkumar Iyer" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.396, + 0.472, + 0.408 + ], + "angle": 0, + "content": "Sachal Malick" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.41, + 0.489, + 0.422 + ], + "angle": 0, + "content": "Sahar Movaghati" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.424, + 0.461, + 0.435 + ], + "angle": 0, + "content": "Sai Balakavi" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.437, + 0.44, + 0.449 + ], + "angle": 0, + "content": "Sai Jakka" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.451, + 0.522, + 0.464 + ], + "angle": 0, + "content": "Sai Kashyap Paruvelli" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.465, + 0.534, + 0.478 + ], + "angle": 0, + "content": "Sai Muralidhar Jayanthi" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.479, + 0.572, + 0.491 + ], + "angle": 0, + "content": "Saicharan Shriram Mujumdar" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.493, + 0.488, + 0.505 + ], + "angle": 0, + "content": "Sainyam Kapoor" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.507, + 0.461, + 0.519 + ], + "angle": 0, + "content": "Sajjad Beygi" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.521, + 0.483, + 0.533 + ], + "angle": 0, + "content": "Saket Dingliwal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.535, + 0.46, + 0.545 + ], + "angle": 0, + "content": "Saleh Soltan" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.548, + 0.459, + 0.559 + ], + "angle": 0, + "content": "Sam Ricklin" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.562, + 0.456, + 0.573 + ], + "angle": 0, + "content": "Sam Tucker" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.575, + 0.468, + 0.587 + ], + "angle": 0, + "content": "Sameer Sinha" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.589, + 0.514, + 0.602 + ], + "angle": 0, + "content": "Samridhi Choudhary" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.604, + 0.458, + 0.614 + ], + "angle": 0, + "content": "Samson Tan" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.617, + 0.494, + 0.628 + ], + "angle": 0, + "content": "Samuel Broscheit" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.63, + 0.486, + 0.642 + ], + "angle": 0, + "content": "Samuel Schulter" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.644, + 0.487, + 0.657 + ], + "angle": 0, + "content": "Sanchit Agarwal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.659, + 0.475, + 0.671 + ], + "angle": 0, + "content": "Sandeep Atluri" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.673, + 0.473, + 0.683 + ], + "angle": 0, + "content": "Sander Valstar" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.686, + 0.488, + 0.698 + ], + "angle": 0, + "content": "Sanjana Shankar" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.7, + 0.503, + 0.712 + ], + "angle": 0, + "content": "Sanyukta Sanyukta" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.713, + 0.483, + 0.725 + ], + "angle": 0, + "content": "Sarthak Khanna" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.727, + 0.509, + 0.74 + ], + "angle": 0, + "content": "Sarvpriye Khetrapal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.741, + 0.504, + 0.753 + ], + "angle": 0, + "content": "Satish Janakiraman" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.755, + 0.461, + 0.766 + ], + "angle": 0, + "content": "Saumil Shah" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.768, + 0.489, + 0.78 + ], + "angle": 0, + "content": "Saurabh Akolkar" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.782, + 0.461, + 0.793 + ], + "angle": 0, + "content": "Saurabh Giri" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.795, + 0.514, + 0.807 + ], + "angle": 0, + "content": "Saurabh Khandelwal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.81, + 0.476, + 0.821 + ], + "angle": 0, + "content": "Saurabh Pawar" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.824, + 0.468, + 0.835 + ], + "angle": 0, + "content": "Saurabh Sahu" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.837, + 0.457, + 0.85 + ], + "angle": 0, + "content": "Sean Huang" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.851, + 0.437, + 0.863 + ], + "angle": 0, + "content": "Sejun Ra" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.865, + 0.509, + 0.878 + ], + "angle": 0, + "content": "Senthilkumar Gopal" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.879, + 0.514, + 0.891 + ], + "angle": 0, + "content": "Sergei Dobroshinsky" + }, + { + "type": "text", + "bbox": [ + 0.376, + 0.892, + 0.451, + 0.904 + ], + "angle": 0, + "content": "Shadi Saba" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.092, + 0.72, + 0.106 + ], + "angle": 0, + "content": "Shamik Roy" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.107, + 0.71, + 0.118 + ], + "angle": 0, + "content": "Shamit Lal" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.12, + 0.807, + 0.132 + ], + "angle": 0, + "content": "Shankar Ananthakrishnan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.135, + 0.704, + 0.146 + ], + "angle": 0, + "content": "Sharon Li" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.148, + 0.743, + 0.161 + ], + "angle": 0, + "content": "Shashwat Srijan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.162, + 0.734, + 0.174 + ], + "angle": 0, + "content": "Shekhar Bhide" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.176, + 0.754, + 0.189 + ], + "angle": 0, + "content": "Sheng Long Tang" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.19, + 0.708, + 0.202 + ], + "angle": 0, + "content": "Sheng Zha" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.204, + 0.735, + 0.216 + ], + "angle": 0, + "content": "Sheree Oraby" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.217, + 0.735, + 0.229 + ], + "angle": 0, + "content": "Sherif Mostafa" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.231, + 0.692, + 0.243 + ], + "angle": 0, + "content": "Shiqi Li" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.245, + 0.744, + 0.256 + ], + "angle": 0, + "content": "Shishir Bharathi" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.258, + 0.743, + 0.27 + ], + "angle": 0, + "content": "ShivamPrakash" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.272, + 0.739, + 0.285 + ], + "angle": 0, + "content": "Shiyuan Huang" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.286, + 0.763, + 0.299 + ], + "angle": 0, + "content": "Shreya Yembarwar" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.3, + 0.745, + 0.312 + ], + "angle": 0, + "content": "Shreyas Pansare" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.314, + 0.779, + 0.326 + ], + "angle": 0, + "content": "Shreyas Subramanian" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.328, + 0.726, + 0.34 + ], + "angle": 0, + "content": "Shrijeet Joshi" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.341, + 0.702, + 0.353 + ], + "angle": 0, + "content": "Shuai Liu" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.355, + 0.712, + 0.367 + ], + "angle": 0, + "content": "Shuai Tang" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.369, + 0.762, + 0.381 + ], + "angle": 0, + "content": "Shubham Chandak" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.383, + 0.736, + 0.395 + ], + "angle": 0, + "content": "Shubham Garg" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.396, + 0.753, + 0.408 + ], + "angle": 0, + "content": "Shubham Katiyar" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.41, + 0.746, + 0.422 + ], + "angle": 0, + "content": "Shubham Mehta" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.424, + 0.763, + 0.436 + ], + "angle": 0, + "content": "Shubham Srivastav" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.438, + 0.71, + 0.45 + ], + "angle": 0, + "content": "Shuo Yang" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.452, + 0.759, + 0.464 + ], + "angle": 0, + "content": "Siddalingesha D S" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.466, + 0.778, + 0.478 + ], + "angle": 0, + "content": "Siddharth Choudhary" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.479, + 0.793, + 0.491 + ], + "angle": 0, + "content": "Siddharth Singh Senger" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.493, + 0.719, + 0.504 + ], + "angle": 0, + "content": "Simon Babb" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.507, + 0.718, + 0.519 + ], + "angle": 0, + "content": "Sina Moeini" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.521, + 0.704, + 0.533 + ], + "angle": 0, + "content": "Siqi Deng" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.535, + 0.747, + 0.546 + ], + "angle": 0, + "content": "Siva Loganathan" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.548, + 0.772, + 0.561 + ], + "angle": 0, + "content": "Slawomir Domagala" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.562, + 0.727, + 0.573 + ], + "angle": 0, + "content": "Sneha Narkar" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.575, + 0.739, + 0.587 + ], + "angle": 0, + "content": "Sneha Wadhwa" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.59, + 0.749, + 0.603 + ], + "angle": 0, + "content": "Songyang Zhang" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.604, + 0.734, + 0.616 + ], + "angle": 0, + "content": "Songyao Jiang" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.617, + 0.729, + 0.629 + ], + "angle": 0, + "content": "Sony Trenous" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.631, + 0.76, + 0.643 + ], + "angle": 0, + "content": "Soumajyoti Sarkar" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.644, + 0.727, + 0.657 + ], + "angle": 0, + "content": "Soumya Saha" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.659, + 0.739, + 0.671 + ], + "angle": 0, + "content": "Sourabh Reddy" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.672, + 0.744, + 0.683 + ], + "angle": 0, + "content": "Sourav Dokania" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.686, + 0.788, + 0.699 + ], + "angle": 0, + "content": "Spurthideepika Sandiri" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.7, + 0.758, + 0.712 + ], + "angle": 0, + "content": "Spyros Matsoukas" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.713, + 0.746, + 0.726 + ], + "angle": 0, + "content": "Sravan Bodapati" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.727, + 0.802, + 0.74 + ], + "angle": 0, + "content": "Sri Harsha Reddy Wdaru" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.741, + 0.833, + 0.753 + ], + "angle": 0, + "content": "Sridevi Yagati Venkateshdatta" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.755, + 0.753, + 0.766 + ], + "angle": 0, + "content": "Srikanth Ronanki" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.768, + 0.815, + 0.78 + ], + "angle": 0, + "content": "Srinivasan R Veeravanallur" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.782, + 0.775, + 0.795 + ], + "angle": 0, + "content": "Sriram Venkatapathy" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.796, + 0.814, + 0.809 + ], + "angle": 0, + "content": "Sriramprabhu Sankaraguru" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.81, + 0.739, + 0.821 + ], + "angle": 0, + "content": "Sruthi Gorantla" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.824, + 0.736, + 0.835 + ], + "angle": 0, + "content": "Sruthi Karuturi" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.837, + 0.742, + 0.848 + ], + "angle": 0, + "content": "Stefan Schroedl" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.851, + 0.759, + 0.864 + ], + "angle": 0, + "content": "Subendhu Rongali" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.865, + 0.744, + 0.876 + ], + "angle": 0, + "content": "Subbasis Kundu" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.879, + 0.744, + 0.89 + ], + "angle": 0, + "content": "Suhaila Shakiah" + }, + { + "type": "text", + "bbox": [ + 0.636, + 0.892, + 0.729, + 0.903 + ], + "angle": 0, + "content": "Sukriti Tiwari" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.092, + 0.204, + 0.105 + ], + "angle": 0, + "content": "Sumit Bharti" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.107, + 0.203, + 0.119 + ], + "angle": 0, + "content": "Sumita Sami" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.12, + 0.222, + 0.133 + ], + "angle": 0, + "content": "Sumith Mathew" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.135, + 0.182, + 0.147 + ], + "angle": 0, + "content": "Sunny Yu" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.148, + 0.205, + 0.16 + ], + "angle": 0, + "content": "Sunwoo Kim" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.162, + 0.259, + 0.175 + ], + "angle": 0, + "content": "Suraj Bajirao Malode" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.176, + 0.265, + 0.189 + ], + "angle": 0, + "content": "Susana Cumplido Riel" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.189, + 0.212, + 0.202 + ], + "angle": 0, + "content": "Swapnil Palod" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.203, + 0.2, + 0.216 + ], + "angle": 0, + "content": "Swastik Roy" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.217, + 0.208, + 0.23 + ], + "angle": 0, + "content": "Syed Furqhan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.231, + 0.23, + 0.244 + ], + "angle": 0, + "content": "Tagyoung Chung" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.245, + 0.235, + 0.257 + ], + "angle": 0, + "content": "Takuma Yoshitani" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.258, + 0.227, + 0.272 + ], + "angle": 0, + "content": "Taojiannan Yang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.273, + 0.24, + 0.285 + ], + "angle": 0, + "content": "Tejaswi Chillakura" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.286, + 0.214, + 0.299 + ], + "angle": 0, + "content": "Tejwant Bajwa" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.3, + 0.22, + 0.313 + ], + "angle": 0, + "content": "Temi Lajumoke" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.314, + 0.192, + 0.326 + ], + "angle": 0, + "content": "Thanh Tran" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.328, + 0.23, + 0.339 + ], + "angle": 0, + "content": "Thomas Gueudre" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.341, + 0.205, + 0.354 + ], + "angle": 0, + "content": "Thomas Jung" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.355, + 0.186, + 0.366 + ], + "angle": 0, + "content": "Tianhui Li" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.369, + 0.212, + 0.38 + ], + "angle": 0, + "content": "Tim Seemman" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.382, + 0.215, + 0.395 + ], + "angle": 0, + "content": "Timothy Leffel" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.396, + 0.216, + 0.409 + ], + "angle": 0, + "content": "Tingting Xiang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.41, + 0.185, + 0.422 + ], + "angle": 0, + "content": "Tirth Patel" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.424, + 0.22, + 0.435 + ], + "angle": 0, + "content": "Tobias Domhan" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.437, + 0.2, + 0.449 + ], + "angle": 0, + "content": "Tobias Falke" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.451, + 0.182, + 0.464 + ], + "angle": 0, + "content": "Toby Guo" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.466, + 0.166, + 0.477 + ], + "angle": 0, + "content": "Tom Li" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.479, + 0.254, + 0.491 + ], + "angle": 0, + "content": "Tomasz Horsczaruk" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.492, + 0.225, + 0.505 + ], + "angle": 0, + "content": "Tomasz Jedynak" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.506, + 0.223, + 0.519 + ], + "angle": 0, + "content": "Tushar Kulkarni" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.52, + 0.189, + 0.532 + ], + "angle": 0, + "content": "Tyst Marin" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.534, + 0.216, + 0.547 + ], + "angle": 0, + "content": "Tytus Metrycki" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.548, + 0.215, + 0.561 + ], + "angle": 0, + "content": "Tzu-Yen Wang" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.562, + 0.196, + 0.575 + ], + "angle": 0, + "content": "Umang Jain" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.576, + 0.215, + 0.588 + ], + "angle": 0, + "content": "Upendra Singh" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.589, + 0.232, + 0.601 + ], + "angle": 0, + "content": "Utkarsh Chirimar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.602, + 0.215, + 0.616 + ], + "angle": 0, + "content": "Vaibhav Gupta" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.617, + 0.202, + 0.628 + ], + "angle": 0, + "content": "Vanshil Shah" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.63, + 0.232, + 0.643 + ], + "angle": 0, + "content": "Varad Deshpande" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.644, + 0.203, + 0.657 + ], + "angle": 0, + "content": "Varad Gunjal" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.658, + 0.236, + 0.67 + ], + "angle": 0, + "content": "Varsha Srikeshava" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.671, + 0.204, + 0.683 + ], + "angle": 0, + "content": "Varsha Vivek" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.685, + 0.23, + 0.699 + ], + "angle": 0, + "content": "Varun Bharadwaj" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.7, + 0.207, + 0.712 + ], + "angle": 0, + "content": "Varun Gangal" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.713, + 0.205, + 0.725 + ], + "angle": 0, + "content": "Varun Kumar" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.727, + 0.234, + 0.74 + ], + "angle": 0, + "content": "Venkatesh Elango" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.741, + 0.227, + 0.752 + ], + "angle": 0, + "content": "Vicente Ordonez" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.754, + 0.192, + 0.765 + ], + "angle": 0, + "content": "Victor Soto" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.092, + 0.533, + 0.106 + ], + "angle": 0, + "content": "Vignesh Radhakrishnan" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.107, + 0.461, + 0.119 + ], + "angle": 0, + "content": "Vihang Patel" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.12, + 0.468, + 0.134 + ], + "angle": 0, + "content": "Vikram Singh" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.135, + 0.545, + 0.147 + ], + "angle": 0, + "content": "Vinay Varma Kolanuvada" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.148, + 0.588, + 0.161 + ], + "angle": 0, + "content": "Vinayshekhar Bannihatti Kumar" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.162, + 0.481, + 0.175 + ], + "angle": 0, + "content": "Vincent Auvray" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.176, + 0.492, + 0.188 + ], + "angle": 0, + "content": "Vincent Cartillier" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.189, + 0.474, + 0.201 + ], + "angle": 0, + "content": "Vincent Ponzo" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.203, + 0.454, + 0.216 + ], + "angle": 0, + "content": "Violet Peng" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.217, + 0.502, + 0.229 + ], + "angle": 0, + "content": "Vishal Khandelwal" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.23, + 0.455, + 0.242 + ], + "angle": 0, + "content": "Vishal Naik" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.244, + 0.531, + 0.256 + ], + "angle": 0, + "content": "Vishvesh Sahasrabudhe" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.258, + 0.478, + 0.271 + ], + "angle": 0, + "content": "Vitaliy Korolev" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.272, + 0.49, + 0.284 + ], + "angle": 0, + "content": "Vivek Gokuladas" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.286, + 0.465, + 0.298 + ], + "angle": 0, + "content": "Vivek Madan" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.3, + 0.505, + 0.311 + ], + "angle": 0, + "content": "Vivek Subramanian" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.313, + 0.475, + 0.326 + ], + "angle": 0, + "content": "Volkan Cevher" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.327, + 0.466, + 0.34 + ], + "angle": 0, + "content": "Vrinda Gupta" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.341, + 0.461, + 0.353 + ], + "angle": 0, + "content": "Wael Hamza" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.355, + 0.449, + 0.367 + ], + "angle": 0, + "content": "Wei Zhang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.369, + 0.471, + 0.381 + ], + "angle": 0, + "content": "Weitong Ruan" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.382, + 0.474, + 0.395 + ], + "angle": 0, + "content": "Weiwei Cheng" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.396, + 0.453, + 0.409 + ], + "angle": 0, + "content": "Wen Zhang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.41, + 0.46, + 0.422 + ], + "angle": 0, + "content": "Wenbo Zhao" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.424, + 0.46, + 0.436 + ], + "angle": 0, + "content": "Wenyan Yao" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.438, + 0.494, + 0.451 + ], + "angle": 0, + "content": "Wenzhuo Ouyang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.452, + 0.483, + 0.464 + ], + "angle": 0, + "content": "Wesley Dashner" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.465, + 0.498, + 0.478 + ], + "angle": 0, + "content": "William Campbell" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.479, + 0.457, + 0.49 + ], + "angle": 0, + "content": "William Lin" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.492, + 0.474, + 0.504 + ], + "angle": 0, + "content": "Willian Martin" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.506, + 0.469, + 0.519 + ], + "angle": 0, + "content": "Wyatt Pearson" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.52, + 0.456, + 0.533 + ], + "angle": 0, + "content": "Xiang Jiang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.534, + 0.469, + 0.547 + ], + "angle": 0, + "content": "Xiangxing Lu" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.548, + 0.474, + 0.561 + ], + "angle": 0, + "content": "Xiangyang Shi" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.562, + 0.471, + 0.575 + ], + "angle": 0, + "content": "Xianwen Peng" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.576, + 0.469, + 0.588 + ], + "angle": 0, + "content": "Xiaofeng Gao" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.589, + 0.463, + 0.602 + ], + "angle": 0, + "content": "Xiaoge Jiang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.603, + 0.458, + 0.615 + ], + "angle": 0, + "content": "Xiaohan Fei" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.617, + 0.472, + 0.629 + ], + "angle": 0, + "content": "Xiaohui Wang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.63, + 0.513, + 0.643 + ], + "angle": 0, + "content": "Xiaozhou Joey Zhou" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.644, + 0.438, + 0.657 + ], + "angle": 0, + "content": "Xin Feng" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.659, + 0.462, + 0.671 + ], + "angle": 0, + "content": "Xinyan Zhao" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.672, + 0.466, + 0.685 + ], + "angle": 0, + "content": "Xinyao Wang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.686, + 0.436, + 0.699 + ], + "angle": 0, + "content": "Xinyu Li" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.7, + 0.443, + 0.713 + ], + "angle": 0, + "content": "Xu Zhang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.714, + 0.454, + 0.726 + ], + "angle": 0, + "content": "Xuan Wang" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.727, + 0.447, + 0.739 + ], + "angle": 0, + "content": "Xuandi Fu" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.74, + 0.468, + 0.754 + ], + "angle": 0, + "content": "Xueling Yuan" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.755, + 0.468, + 0.768 + ], + "angle": 0, + "content": "Xuning Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.092, + 0.756, + 0.105 + ], + "angle": 0, + "content": "Yadunandana Rao" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.107, + 0.721, + 0.119 + ], + "angle": 0, + "content": "Yair Tavizon" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.12, + 0.737, + 0.134 + ], + "angle": 0, + "content": "Yan Rossiytsev" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.135, + 0.721, + 0.146 + ], + "angle": 0, + "content": "Yanbei Chen" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.148, + 0.698, + 0.161 + ], + "angle": 0, + "content": "Yang Liu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.162, + 0.701, + 0.175 + ], + "angle": 0, + "content": "Yang Zou" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.176, + 0.735, + 0.188 + ], + "angle": 0, + "content": "Yangsook Park" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.189, + 0.744, + 0.203 + ], + "angle": 0, + "content": "Yannick Versley" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.204, + 0.732, + 0.216 + ], + "angle": 0, + "content": "Yanyan Zhang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.217, + 0.706, + 0.229 + ], + "angle": 0, + "content": "Yash Patel" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.23, + 0.733, + 0.244 + ], + "angle": 0, + "content": "Yen-Cheng Lu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.245, + 0.681, + 0.256 + ], + "angle": 0, + "content": "Yi Pan" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.258, + 0.776, + 0.271 + ], + "angle": 0, + "content": "Yi-Hsiang (Sean) Lai" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.272, + 0.708, + 0.284 + ], + "angle": 0, + "content": "Yichen Hu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.286, + 0.71, + 0.299 + ], + "angle": 0, + "content": "Yida Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.3, + 0.723, + 0.313 + ], + "angle": 0, + "content": "Yiheng Zhou" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.314, + 0.714, + 0.327 + ], + "angle": 0, + "content": "Yilin Xiang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.328, + 0.695, + 0.34 + ], + "angle": 0, + "content": "Ying Shi" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.342, + 0.71, + 0.354 + ], + "angle": 0, + "content": "Ying Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.355, + 0.738, + 0.367 + ], + "angle": 0, + "content": "Yishai Galatzer" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.369, + 0.734, + 0.382 + ], + "angle": 0, + "content": "Yongxin Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.383, + 0.717, + 0.395 + ], + "angle": 0, + "content": "Yorick Shen" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.396, + 0.715, + 0.408 + ], + "angle": 0, + "content": "Yuchen Sun" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.41, + 0.746, + 0.422 + ], + "angle": 0, + "content": "Yudi Purwatama" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.424, + 0.731, + 0.436 + ], + "angle": 0, + "content": "Yue (Rex) Wu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.438, + 0.686, + 0.449 + ], + "angle": 0, + "content": "Yue Gu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.451, + 0.736, + 0.465 + ], + "angle": 0, + "content": "Yuechun Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.466, + 0.713, + 0.479 + ], + "angle": 0, + "content": "Yujun Zeng" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.48, + 0.734, + 0.491 + ], + "angle": 0, + "content": "Yuncong Chen" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.492, + 0.718, + 0.504 + ], + "angle": 0, + "content": "Yunke Zhou" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.506, + 0.721, + 0.519 + ], + "angle": 0, + "content": "Yusheng Xie" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.52, + 0.704, + 0.533 + ], + "angle": 0, + "content": "Yvon Guy" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.534, + 0.789, + 0.547 + ], + "angle": 0, + "content": "Zbigniew Ambrozinski" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.548, + 0.721, + 0.56 + ], + "angle": 0, + "content": "Zhaowei Cai" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.562, + 0.718, + 0.575 + ], + "angle": 0, + "content": "Zhen Zhang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.576, + 0.721, + 0.589 + ], + "angle": 0, + "content": "Zheng Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.59, + 0.724, + 0.602 + ], + "angle": 0, + "content": "Zhenghui Jin" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.603, + 0.724, + 0.615 + ], + "angle": 0, + "content": "Zhewei Zhao" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.617, + 0.711, + 0.629 + ], + "angle": 0, + "content": "Zhiheng Li" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.63, + 0.723, + 0.643 + ], + "angle": 0, + "content": "Zhiheng Luo" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.645, + 0.738, + 0.658 + ], + "angle": 0, + "content": "Zhikang Zhang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.659, + 0.715, + 0.671 + ], + "angle": 0, + "content": "Zhilin Fang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.672, + 0.697, + 0.684 + ], + "angle": 0, + "content": "Zhiqi Bu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.686, + 0.734, + 0.699 + ], + "angle": 0, + "content": "Zhiyuan Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.7, + 0.72, + 0.712 + ], + "angle": 0, + "content": "Zhizhong Li" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.713, + 0.72, + 0.726 + ], + "angle": 0, + "content": "Zijian Wang" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.727, + 0.768, + 0.74 + ], + "angle": 0, + "content": "Zimeng (Chris) Qiu" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.741, + 0.691, + 0.752 + ], + "angle": 0, + "content": "Zishi Li" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.789, + 0.293, + 0.804 + ], + "angle": 0, + "content": "D.2 Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.813, + 0.884, + 0.843 + ], + "angle": 0, + "content": "We would like to acknowledge the following individuals who supported the development of the Nova models and services during the Nova program." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.857, + 0.264, + 0.871 + ], + "angle": 0, + "content": "Abdelrahman Badawy" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.872, + 0.224, + 0.884 + ], + "angle": 0, + "content": "Abtin Rasoulian" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.885, + 0.239, + 0.898 + ], + "angle": 0, + "content": "Adam Baranowski" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.857, + 0.484, + 0.871 + ], + "angle": 0, + "content": "Aishwarya Kore" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.872, + 0.538, + 0.885 + ], + "angle": 0, + "content": "Aishwarya Padmakumar" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.886, + 0.451, + 0.897 + ], + "angle": 0, + "content": "Alain Krok" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.857, + 0.718, + 0.87 + ], + "angle": 0, + "content": "Alex Mould" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.872, + 0.699, + 0.884 + ], + "angle": 0, + "content": "Alex Sun" + }, + { + "type": "text", + "bbox": [ + 0.635, + 0.885, + 0.787, + 0.9 + ], + "angle": 0, + "content": "Alexandros Papangelis" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.372, + 0.043, + 0.621, + 0.058 + ], + "angle": 0, + "content": "The Amazon Nova Family of Models" + }, + { + "type": "table", + "bbox": [ + 0.113, + 0.092, + 0.322, + 0.755 + ], + "angle": 0, + "content": "
Alfred Shen
Amaran Asokkumar
Amiya Chakraborty
Anastasios Alexandridis
Angeliki Metallinou
Anila Joshi
Anup Katariya
Arda Keskiner
Avinash Venkatagiri
Aya Elzoheiry
Baishali Chaudhury
Ben Friebe
Bigad Soleiman
Bob Li
Brad Porter
Brian Chou
Brian Yost
Burak Gozluklu
Chad Connally
Chris Azer
Chris Beauchene
Chris Greenwood
Chris Johnson
Clay Cheng
Craig Rowland
Di Jin
Di Wu
Diego Socolinsky
Don Kretsch
Dylan Martin
Emma Lister
Eva Lasarcyk
Evan Kravitz
Federico D'Alessio
Flora Wang
Francisco Calderon Rodriguez
Gamaleldin Elsayed
Gaurav Rele
Gaurav Sukhatme
Gourav Datta
Hadrien Glaude
Hanbo Wang
Hans Hoeijmaker
Haotian An
Harpreet Cheema
Harshit Pande
Hongbin Zheng
Huda Khayrallah
" + }, + { + "type": "table", + "bbox": [ + 0.375, + 0.092, + 0.514, + 0.753 + ], + "angle": 0, + "content": "
Isaac Privitera
Jacob Zhiyuan Fang
Jady Liu
Jae Oh Woo
Jamal Saboune
James Park
Jianbo Yuan
Jianwei Feng
Jie Li
Jinwoo Park
Johan Esbjourner
Jonathan makunga
JoonHyung Kim
Jorge Beltran
Jose Garrido Ramas
Julie Baca
Justin Lewis
Kamran Razi
Kangyan Liu
Kasana Mahesh
Kelvin Qian
Kyle Goehner
Kyle Saggar
Laith Al-Saadoon
Lei Sun
Lily Liao
Long Chen
Lukacs Ablonczy
Luke Luneau
Maciej Eichler
Mallory McManamo
Manju Arakere
Matt McCoy
Matthew Chang
Meghal Varia
Meghana Ashok
Melanie Li
Mifu Suzuki
Negin Sokhandan
Nick Biso
Nico Bishop
Nicolle Borges
Palash Goyal
Parker Coleman
Paul Sumarokov
Pavel Kveton
Philipp Lerche
Pratibha Kumari
" + }, + { + "type": "table", + "bbox": [ + 0.635, + 0.092, + 0.822, + 0.727 + ], + "angle": 0, + "content": "
Rahul Agarwal
Rahul Ghosh
Rahul Kulkarni
Raj Kumar
Ramana Keerthi
Rams Sundaram
Raymond Fang
Reethika Kesani
Ryan Razkenari
Sarath Krishnan
Scott Patten
Seokhwan Kim
Sepehr Eghbali
Sergey Pugachev
Sertan Alkan
Shailav Taneja
Sheamus Punch
Shikib Mehri
Shilpa Singh
Shraddha Ravishankar
Sijia Liu
Sitanshu Gupta
Sol Vesdapunt
Spencer Romo
Sravya Uppu
Srivani Kambhampati
Stephanie Xie
Sujitha Martin
Sungjin Lee
Sungmin Hong
Tanner McRae
Thomas Patterson
Tina Li
Tom Liang
Trong Nguyen
Vasudev Mahesh Purandare
Vidya Sagar Ravipati
Vu San Ha Huynh
Weijuan Wu
Xiaolong Li
Xinyi Xu
Yaroslav Nechaev
Yuan Tian
Yunfei Bai
Zach Hille
Ziyan Tian
" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "48" + } + ] +] \ No newline at end of file diff --git a/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_origin.pdf b/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..55f68cdea1c5a75a9069ba408161582c9916f48f --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c73ccbeacb6d721319fc71de2660a7540eba591b0a39f3cb3fb19b49d1d8f97 +size 20999633 diff --git a/data/2025/2506_12xxx/2506.12103/full.md b/data/2025/2506_12xxx/2506.12103/full.md new file mode 100644 index 0000000000000000000000000000000000000000..15851eeaf8f16f9db990a2d1bdf46a5195263d3f --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/full.md @@ -0,0 +1,2705 @@ +# The Amazon Nova Family of Models: Technical Report and Model Card + +![](images/3156a9077f1c972bfe8d4f5736cc7cb801a543c0a7e1872ae7041bb75bf072ce.jpg) +Figure 1: The Amazon Nova family of models + +![](images/1888da7922d07148b8348987b53dfc4837e982a06f12992d09585bdf6e01d4e8.jpg) +Amazon Artificial General Intelligence + +![](images/6eb562a9d279dd55a2a329e5996f1b4fa88c3aa97c915eefdfe3c4fad694eb2a.jpg) + +# Abstract + +We present Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance. Amazon Nova Pro is a highly-capable multimodal model with the best combination of accuracy, speed, and cost for a wide range of tasks. Amazon Nova Lite is a low-cost multimodal model that is lightning fast for processing images, video, documents and text. Amazon Nova Micro is a text-only model that delivers our lowest-latency responses at very low cost. Amazon Nova Canvas is an image generation model that creates professional grade images with rich customization controls. Amazon Nova Reel is a video generation model offering high-quality outputs, customization, and motion control. Our models were built responsibly and with a commitment to customer trust, security, and reliability. We report benchmarking results for core capabilities, agentic performance, long context, functional adaptation, runtime performance, and human evaluation. + +# Contents + +# 1 Introduction 3 + +1.1 Amazon Nova Pro, Lite, and Micro 3 +1.2 Amazon Nova Canvas and Reel 3 + +# 2 Amazon Nova Pro, Lite, and Micro Evaluations 5 + +2.1 Core capability public benchmarks 5 + +2.1.1 Core capability text benchmarks and results 5 +2.1.2 Core capability multimodal benchmarks and results 7 + +2.2 Agentic workflows 8 + +2.2.1 Agentic text benchmarks and results 9 +2.2.2 Agentic multimodal benchmarks and results 9 + +2.3 Long context 10 + +2.4 Functional expertise 11 + +2.4.1 Software engineering 12 +2.4.2 Financial analysis 12 +2.4.3 Retrieval augmented generation 12 + +2.5 Runtime performance 13 + +# 3 Amazon Nova Canvas Evaluation 15 + +3.1 Automated metrics 15 +3.2 Human evaluation 15 + +# 4 Amazon Nova Reel Evaluation 16 + +4.1 Human evaluation metrics 16 +4.2 Dataset 16 +4.3 Implementation details & results 17 + +# 5 Responsible AI 17 + +5.1 Defining our RAI objectives 17 +5.2 Ensuring adherence to RAI objectives 18 +5.3 RAI Evaluation 19 +5.4 Red Teaming 19 + +5.4.1 Internal Red Teaming 19 +5.4.2 External Red Teaming 20 +5.4.3 Automated Red Teaming 21 + +# 6 Training Infrastructure 21 + +A Amazon Nova Canvas Capabilities 28 +B Prompts and Scoring 30 +C Qualitative examples of multimodal intelligence 39 +D Correspondence and Contributors 43 + +# 1 Introduction + +This document introduces Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance. + +# 1.1 Amazon Nova Pro, Lite, and Micro + +Key capabilities of Amazon Nova Pro, Lite, and Micro include: + +- Frontier intelligence: Amazon Nova models possess frontier intelligence, enabling them to understand and process complex language tasks with state-of-the-art accuracy. Amazon Nova Micro sets new standards in its intelligence tier in several text benchmarks such as Language Understanding (MMLU), Deep Reasoning (GPQA), Mathematics (MATH), and Multi-step Reasoning (Big-Bench Hard). Our multimodal models, Amazon Nova Pro and Lite, take text, images, documents, and video as input and generate text as output. These models set standards in several benchmarks such as Video Captioning (VATEX), Visual QA (TextVQA), Function Calling (BFCL), and multimodal agentic benchmarks (GroundUI-1K, VisualWebBench, Mind2Web) in their respective intelligence tiers. These models are the first to offer video understanding capabilities on Amazon Bedrock, enabling deeper insights from multimedia content. +- Speed: Amazon Nova has been designed for fast inference, with Amazon Micro, Lite, and Pro each being one of the fastest models in their respective intelligence tiers. +- Agentic Workflows: Amazon Nova Pro, Lite, and Micro can power AI agents capable of breaking down and executing multi-step tasks. These models are integrated with Bedrock Knowledge Bases and they excel at retrieval-augmented generation (RAG) to ensure the best accuracy by grounding their responses to the developer's data. +- Customizability: Developers can fine-tune these models with multimodal data (Pro and Lite) or text data (Pro, Lite, and Micro), providing the flexibility to achieve desired accuracy, latency, and cost. Developers can also run self-service Custom Fine-Tuning (CFT) and distillation of larger models to smaller ones via Bedrock APIs. +- Price-Performance: Each model was optimized to deliver exceptional price-performance value, offering state-of-the-art performance on key benchmarks at low cost. + +Amazon Nova Pro, Lite, and Micro are based on the Transformer architecture [74]. Each model went through a series of training processes that began with pretraining using a mixture of large amounts of multilingual and multimodal data. Our models were trained on data from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. We curated data from over 200 languages, with particular emphasis on Arabic, Dutch, English, French, German, Hebrew, Hindi, Italian, Japanese, Korean, Portuguese, Russian, Simplified Chinese, Spanish, and Turkish. After pretraining, models iteratively went through a series of fine-tuning stages, including Supervised Fine-Tuning (SFT) on instruction-demonstration pairs (including multimodal ones) and reward model (RM) training from human preference data [59]. Finally, the models learned from human preferences via methods like Direct Preference Optimization (DPO) [62] and Proximal Policy Optimization (PPO) [68] to ensure that the final models are aligned with human preferences in both quality and responsibility. + +# 1.2 Amazon Nova Canvas and Reel + +Amazon Nova Canvas and Amazon Nova Reel are designed to create realistic multimodal content, including images and videos, for a wide range of applications such as advertising, marketing, and entertainment. + +Amazon Nova Canvas offers the following functionalities, with more details provided in Appendix A: + +- Text-to-image generation: Amazon Nova Canvas can generate images with various resolutions (from 512 up to 2K horizontal resolution) and aspect ratios (any aspect ratio between 1:4 and 4:1 with a maximum of 4.2M pixels). Customers can provide reference images to guide the model to generate outputs in a specific style or color palette, or to generate variations of an image. +- Image editing: Amazon Nova Canvas allows precise image editing operations like inpainting and outpainting through natural language mask prompts. These mask prompts describe the specific area of the input image that needs to be repaired. The user can also easily change a background with the background removal feature leaving the subject of the image unchanged. + +Amazon Nova Reel offers the following functionalities: + +- Generate videos from a text prompt: Amazon Nova Reel can generate high-quality videos of 6-second duration (720p resolution at 24 frames per second) from a text prompt. +- Generate videos from a reference image and a prompt: Amazon Nova Reel brings images to motion and generates videos that are guided by the input image and a text prompt. +- Camera motion control using a text prompt: With camera motion control in Amazon Nova Reel, the user can guide camera motion with text prompts like "zoom" and "dolly forward" to get the exact visual needed for each video. Amazon Nova Reel supports more than 20 camera motions. For more details, please refer to our prompting guide1. + +Amazon Nova Canvas and Reel are latent diffusion models [61] where a Variational AutoEncoder (VAE) [41] maps the image or video frames to latent variables on which the diffusion process happens. A text encoder tokenizes input text prompts into tokens which are then passed to the diffusion model as a conditioning signal. At inference time, a latent variable is initialized with random noise sampled from a Gaussian distribution, which is then denoised by the trained diffusion model iteratively into a clean latent variable. The clean latent variable is decoded back to images or video frames by the decoder of the VAE. Both models underwent a two-phased approach of pretraining and fine-tuning. Pretraining data were sourced from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. Our highly scalable data filtering, dedduplication, and enrichment pipelines were based on AWS EMR [2] and AWS Batch [1], as well as other AWS services. + +# 2 Amazon Nova Pro, Lite, and Micro Evaluations + +In this section, we report benchmarking results for Amazon Nova models and for select publicly-available models, including by citing existing public results and by measuring their performance. In cases for which the result is a simple average of binary scores, we assume a Gaussian distribution for the sample and approximate the $95\%$ confidence interval as: + +$$ +C I (S) = 1. 9 6 \times \sqrt {\frac {S \times (1 - S)}{N}} \tag {1} +$$ + +where $CI$ is the $95\%$ confidence interval, $S$ is the measured score for the benchmark, and $N$ is the sample size [48, 45]. + +# 2.1 Core capability public benchmarks + +We evaluate Amazon Nova models on a suite of automated public benchmarks to assess core capabilities, including for both text-only (Section 2.1.1) and multimodal (Section 2.1.2) use cases. + +# 2.1.1 Core capability text benchmarks and results + +We evaluate select core capabilities of Amazon Nova models on a variety of public text-only benchmarks, spanning general knowledge, reasoning, language understanding, multilinguality, and instruction following. + +The following list briefly describes our selected text-only benchmarks. The prompts used for evaluation of each benchmark are summarized in Appendix B.1. + +- MMLU [36]: Massive Multitask Language Understanding (MMLU) is a multiple-choice question answering benchmark that covers 57 subject areas across STEM, humanities, and social sciences. Subjects include law, physics, mathematics, computer science, history, and more. The difficulty levels vary from elementary level to advanced professional level, focusing on both world knowledge and problem solving abilities. We use 0-shot Chain-of-Thought (CoT) [79] for prompting and report the macro average exact match accuracy across all subjects. +- ARC-C [22]: The AI2's Reasoning Challenge (ARC) is a multiple-choice question-answering dataset, which contains science questions from grade 3 to grade 9 exams. We use 0-shot CoT for prompting and report exact match accuracy. +- DROP [26]: Discrete Reasoning Over Paragraphs (DROP) is a crowdsourced reading comprehension dataset that requires reasoning and operating over multiple input positions from the reference text. We use 0-shot CoT for prompting and report f1 score. +- GPQA [64]: Graduate-level Google-Proof Question and Answering (GPQA) is a challenging and high-quality multiple-choice question answering benchmark written by domain experts who have or are pursuing PhDs in biology, physics, and chemistry. We use 0-shot CoT for prompting and report exact match accuracy on the main set. +- MATH [37]: MATH is a mathematics problem solving benchmark, consisting of problems from mathematics competitions including the American Mathematics Competitions (AMC 10 and AMC 12), the American Invitational Mathematics Examination (AIME) and more. We use 0-shot CoT for prompting and report the exact match accuracy on the MATH5k set. +- GSM8K [23]: Grade School Math 8K (GSM8K) is a math benchmark consisting of 8,500 high-quality and diverse grade school math problems. The benchmark tests basic mathematical problem solving capabilities requiring multi-step reasoning. We use 0-shot CoT for prompting and report the exact match accuracy on the test set containing 1,319 samples. +- IFEval [89]: IFeval is an instruction-following benchmark, which evaluates a model's capability of following "verifiable instructions" such as "mention the keyword of AI at least 3 times". The dataset contains 25 types of verifiable instructions and in total 541 prompts, where each prompt contains one or more verifiable instructions in natural language. We report the instruction-level accuracy under loose constraints. +- BBH [72]: Big Bench Hard (BBH) is a diverse benchmark consisting of an aggregate of 23 diverse subjects that cover algorithmic and NLP tasks ranging from casual logic tasks to word sorting and movie recommendations. The tasks are both multiple choice and open generation tasks. We report the macro average exact match accuracy across the subjects. + +
tok/secMMLUARC-CDROPGPQAMATHGSM8kIFEvalBBH
accuracyaccuracyF1-scoreaccuracyaccuracyaccuracyinstruction-level loose accuracyaccuracy
Nova Pro10085.994.8±1.385.4±0.746.9±4.676.6±1.294.8±1.292.1±1.886.9
Nova Lite15780.592.4±1.580.2±0.842.0±4.673.3±1.294.5±1.289.7±2.182.4
Nova Micro21077.690.2±1.779.3±0.840.0±4.569.3±1.392.3±1.487.2±2.379.5
0-shot CoT0-shot6-shot CoT0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoT
Claude 3.5 Sonnet (Oct)5789.396.3M±1.188.3±0.658.0M±4.678.3±1.196.5M±1.090.2*±2.093.2
Claude 3.5 Haiku6480.390.9M±1.683.1±0.837.5M±4.569.4±1.393.8M±1.385.9*±2.486.6
0-shot CoT25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoT
Gemini 1.5 Pro (002)5885.995.4M±1.274.9±0.955.1M±4.686.5±0.990.8±1.691.7M±1.989.2
Gemini 1.5 Flash (002)19078.994.3M±1.378.4±0.845.1M±4.677.9±1.286.2±1.991.6M±1.985.5
Gemini 1.5 Flash 8B (001)28368.188.7M±1.868.1M±0.933.5M±4.458.7±1.484.5M±2.086.1M±2.369.5
5-shot25-shot3-shot0-shot4-shot11-shot0-shot3-shot
GPT-4o16388.796.2M±1.183.4±0.748.4M±4.676.6±1.292.6M±1.489.8M±2.183.0M
GPT-4o Mini11382.092.3M±1.579.7±0.841.7M±4.670.2±1.386.4M±1.887.4M±2.381.0M
0-shot25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shot
Llama 3.2 90B4086.094.8±1.3-46.7±4.668.0±1.395.1±1.290.9M±2.0-
Llama 3.2 11B12473.083.4±2.1-32.8±4.351.9±1.484.5±2.085.0M±2.4-
Llama 3.1 8B15773.083.4±2.1-30.4±4.351.9±1.484.5±2.085.0M±2.4-
0-shot CoT25-shot-0-shot CoT0-shot CoT8-shot CoT--
+ +Table 1: Quantitative results on core capability benchmarks (MMLU [36], ARC-C [22], DROP [26], GPQA [64], MATH [37]), GSM8K [23], IFEval [89] and BigBench-Hard (BBH) [72]). Unless otherwise noted, all reference numbers are taken from the original technical reports and websites for Claude models [14, 11], GPT4 models [58, 57], Llama models [45] and Gemini models [32]. Results marked with $M$ were measured by $\mathrm{us}^2$ . Claude numbers for IFEval (taken from [14]) are marked with an asterisk (*), as the scoring methodology is unspecified in the report. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5. + +Table 1 summarizes the quantitative results of Nova models and select public models on the aforementioned benchmarks for core capabilities. When available, we reference the highest publicly-reported numbers for each benchmark from the official technical reports and websites for Claude, Gemini, OpenAI and Llama family of models. Amazon Nova Pro, Lite, and Micro demonstrate strong performance across all benchmarks, showcasing their advanced core intelligence, particularly Amazon Nova Micro and Lite on math, reasoning, and instruction following benchmarks. + +We also evaluate the translation capabilities of Nova models. Flores200 [73, 34, 35], or simply Flores, is a machine translation benchmark consisting of translations from 842 distinct web articles, which tests the translation capabilities between English and non-English languages. Sentences are 21 words long on average. We use a 0-shot setup and report the macro average of two metrics, spBleu and COMET22 score [63] across a set of languages (Arabic, German, Spanish, French, Hindi, Italian, Japanese, Korean, Portuguese, Hebrew, Turkish, Simplified Chinese, Russian, Dutch) for translation from and into English. The prompts used for evaluation are summarized in Appendix B.1. Table 2 summarizes our quantitative results on Flores, demonstrating strong multilingual performance on translation for Amazon Nova Pro, Lite, and Micro. + +
FLORES (0-shot)
en → Set1Set1 → en
tok/secspBleu (↑)COMET22 (↑)spBleu (↑)COMET22 (↑)
Nova Pro10043.489.144.489.0
Nova Lite15741.588.843.188.8
Nova Micro21040.288.542.688.7
Claude 3.5 Sonnet (Oct)5742.5M89.4M43.5M89.1M
Claude 3.5 Haiku6440.0M88.5M40.2M88.3M
Gemini 1.5 Pro (002)5743.0M*89.1M*45.6M*89.1M*
Gemini 1.5 Flash (002)19040.0M*88.5M*42.9M*88.8M*
Gemini 1.5 Flash 8B (001)28338.2M*88.0M*41.4M*88.5M*
GPT-4o16343.1M*89.2M*43.9M*89.0M*
GPT-4o Mini11341.1M*88.7M*41.9M*88.7M*
Llama 3.2 90B4039.7M88.2M43.7M88.5M
Llama 3.2 11B12433.0M85.7M36.3M86.3M
Llama 3.1 8B15732.7M85.5M36.5M86.5M
+ +Table 2: Quantitative results on Flores200 [34], a machine translation benchmark. Set1 refers to {de, es, fr, it, pt, ja, ar, hi, ru, nl, tr, he, ko, zh}. Results marked with $M$ were measured by us. Results marked with an asterisk (*) were obtained using an alternate prompt which can be found in Appendix B.1 Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5. + +# 2.1.2 Core capability multimodal benchmarks and results + +In this section we evaluate the multimodal capabilities of Amazon Nova models on a diverse set of public benchmarks. Our selection of multimodal benchmarks aims to probe for various capabilities, including natural image understanding, document understanding with charts and graphs, text understanding, and temporal reasoning in videos. For all benchmarks, we follow the suggested metrics and choice of data split for evaluation. The following list briefly describes the selected benchmarks. + +- MMMU [85]: The Massive Multi-discipline Multimodal Understanding benchmark consists of college-level multiple-choice and open-ended questions from 30 different disciplines. We use Chain-of-Thought (CoT) prompting for this benchmark and report accuracy. +ChartQA [50]: The 2,500 questions of this benchmark cover three different types of charts (bar, line and pie) and require strong visual, logical, and arithmetical reasoning capabilities. We evaluate on the test set and report relaxed accuracy. +- DocVQA [51]: This benchmark probes capabilities on document analysis and recognition, including Optical Character Recognition (OCR). The 5,349 questions contain images from a diverse set of documents, ranging + +
MMMU (CoT)Chart QAcDoc VQAText VQAVATEXEgo Schema
tok/secvaltesttestvaltesttest
accuracyrelaxed accuracyANLSweighted accuracyCIDEraccuracy
Amazon Nova Pro10061.7 ±3.289.2 ±1.293.581.577.872.1 ±5.4
Amazon Nova Lite15756.2 ±3.286.8 ±1.392.480.277.871.4 ±5.4
Claude 3.5 Sonnet (Oct)5770.4 ±3.090.8 ±1.194.261.7M--
Claude 3 Haiku6450.2 ±3.382.0 ±1.588.8---
Gemini 1.5 Pro (001)5865.9 ±3.1E87.2 ±1.393.1B78.764.6A72.2 ±5.4
Gemini 1.5 Flash (001)19062.3 ±3.2E85.4 ±1.489.9B78.757.165.7 ±5.7
Gemini 1.5 Flash 8B (001)28353.7 ±3.3F78.2 ±1.6G73.666.753.2A-
GPT-4o (May)-69.1 ±3.085.7 ±1.492.877.2DM-72.2 ±5.4
GPT-4o Mini (Jul)11359.4 ±3.279.2 ±1.6M-70.3M--
Llama 3.2 90B4060.3 ±3.285.5 ±1.490.180.7M--
Llama 3.2 11B12450.7 ±3.383.4 ±1.588.471.3M--
+ +Table 3: Quantitative results on four image understanding benchmarks (MMMU [85], ChartQA [50], DocVQA [51], TextVQA [70]) and 2 video understanding benchmarks (VATEX [78] and EgoSchema [49]). Higher numbers are better for all benchmarks $(\uparrow)$ . Unless otherwise noted, all evaluations are 0-shot and reference numbers are taken from the original technical reports and websites for Claude models [11, 12], GPT4 models [56, 55], Llama models [45, 53] and Gemini models [32, 33]. Remarks: (A) 4-shot evaluation; (B) External Optical Character Recognition (OCR) was used; (C) All models except Amazon Nova use CoT; (D) GPT-4o (Nov); (E) Gemini 1.5 Flash/Pro (002) models; (F) Reported in [33]; (G) Reported in [4]; (M) Claude 3.5 Sonnet and Llama 3.2 results for TextVQA as well as GPT4o and GPT4o mini results on ChartQA, TextVQA and VATEX were measured by us. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5. + +from 1940 to 2020 and covering multiple industries. We report Average Normalized Levenshtein Similarity (ANLS). + +- TextVQA [70]: The 5,000 samples of this dataset focus specifically on text-reading capabilities (OCR) in natural images. We report weighted accuracy on the validation set. +- VATEX [78]: This video captioning benchmark covers a diverse set of human activities. We evaluate on the public test set containing videos with a length of around 10 seconds. The CIDEr [75] score is used for evaluation. +- EgoSchema [49]: The unique characteristic of this long-form video question answering benchmark is its high "certificate length" [15], which is, loosely speaking, the time it takes a human to verify the video description. The videos cover a broad range of natural human activities and come with human-curated multiple-choice question-answer pairs. + +Table 3 summarizes our quantitative results on multiple image and video understanding benchmarks. Amazon Nova Pro and Lite achieve high scores across all benchmarks. Chart understanding on ChartQA and video understanding on VATEX stand out, where Nova models rank either first or second. We provide the prompt templates for all benchmarks in Appendix B.2, as well as qualitative examples in Appendix C. + +# 2.2 Agentic workflows + +Amazon Nova Pro, Lite, and Micro models can be used as agents. An agent considers a suite of tools and APIs, reasons about the user's request and past conversational history, chooses if a tool should be used and, if so, decides which tool to use, invokes the tool, assesses the outcome from the tool, and then communicates back with the user [83, 67, 46, 60]. + +To this end, we evaluated our Nova models on agentic workflows that require textual understanding and visual reasoning. For textual understanding (Section 2.2.1), we used the Berkeley Function Calling Leaderboard benchmark to test our models' capabilities in function calling and orchestrating real-world applications. For visual reasoning (Section 2.2.2), + +we evaluate on three benchmarks that require image understanding capabilities for correct function calling. We highlight that both Amazon Nova Pro and Lite models set a new state of the art on these challenging benchmarks. + +# 2.2.1 Agentic text benchmarks and results + +Table 4 presents quantitative results on the Berkeley Function Calling Leaderboard v3 (BFCL).3 Stemming from the Gorilla project [60], the revamped BFCL [81] benchmark evaluates a model's ability to accurately call and utilize real-world functions, or tools, based on a user's natural language request. Amazon Nova models particularly excel in the Abstract Syntax Tree (AST), Execution, and Relevance metrics, as well as overall scores versus comparable models. Amazon Nova Lite and Micro also had the lowest latency of the selected models. + +In Table 4, AST measures the exact match function calling performance of the model when comparing function names and argument/value signatures to a human-curated ground truth. While AST allows for some soft matching based on manually-defined, permitted argument values (e.g., different date formats), Execution measures a function call's accuracy not by the call signature itself, but by comparing the return value of the call when executed against a real API. + +To measure the rate of hallucination, Irrelevance measures the model's ability to recognize that it does not have the appropriate functions available to help the user, and should therefore not call any. Relevance, as the opposite of irrelevance, measures the model's ability to recognize it indeed does have the functions necessary to help the user (but does not verify function signature accuracy). For both metrics, higher numbers are better. + +
OverallLatencyNon-LiveLiveMulti-TurnHallucination
accuracy(↑)seconds(↓)AST(↑)execution(↑)overall(↑)overall(↑)relevance(↑)irrelevance(↑)
Nova Pro68.41.090.189.871.545.195.165.1
Nova Lite66.60.687.586.466.050.397.649.1
Nova Micro56.20.587.289.767.415.587.857.6
Claude Sonnet 3.5 (Jun)61.33.970.066.374.740.068.374.6
Claude Haiku 340.41.541.747.557.720.697.629.4
Gemini 1.5 Pro (002)59.83.088.091.474.316.375.675.1
Gemini 1.5 Flash (002)55.31.179.780.673.212.578.175.7
Llama 3.2 90BA54.3N/A88.989.361.114.392.758.4
Llama 3.2 11BA49.9N/A83.687.357.910.578.141.6
GPT-4o (Aug)68.91.585.985.675.445.363.482.9
GPT-4o-mini (Jul)60.71.684.384.170.228.380.571.8
+ +Table 4: Results on the Berkeley Function Calling Leaderboard (BFCL) v3 as of the Nov 17th, 2024 update. We include the latest versions of the models available on the leaderboard at that time. (A) We use leaderboard results for Llama 3.1 8B and 70B for Llama 3.2 11B and 90B, respectively, given the shared text LLM. + +# 2.2.2 Agentic multimodal benchmarks and results + +The Amazon Nova Pro and Lite models provide native support for multimodal inputs, including agentic workflows. In this section, we present results from our models on three different benchmarks that require agents to navigate websites to solve real-world tasks. Websites are typically represented as screenshots in these datasets to correctly convey all style elements and visual data as rendered in a standard web browser. + +- VisualWebBench [43]: This benchmark includes seven core tasks related to web browsing, including captioning, question answering, OCR, action prediction, and grounding. All models are evaluated on 1,536 samples that span more than 100 websites from 12 domains. The final metric is the average over different metrics for the individual core tasks. + +- MM-Mind2Web [86]: This extension of the original Mind2Web [24] benchmark links samples with the original website screenshots, making it multimodal. An agent needs to select an element and pick one of three elementary actions (click, type, or select) alongside a value for some actions. We report micro average over the per-sample step accuracy, where an agent is successful only if element and action selection, as well as the predicted value, are correct. +- GroundUI-1K [87]: This benchmark is composed of multiple existing datasets, including Mind2Web [24] and repurposes them as a grounding task. On 1,000 samples for evaluation, a multimodal agent is given an instruction and a screenshot of a website from a wide variety of domains and asked to predict the 2D location of the desired UI element. The agent is correct if its predicted 2D location is within the ground truth bounding box. + +Table 5 shows the results of our models on multimodal agent workflows along with other publicly-reported results. Both Amazon Nova models, Lite and Pro, demonstrate strong visual reasoning and agentic capabilities and achieve high scores on all three benchmarks. + +
VisualWebBench +compositEdMM-Mind2Web +step accuracyGroundUI-1K +accuracy
Nova Pro79.763.781.4
Nova Lite77.760.780.2
Claude 3.5 Sonnet (Oct)76.7M61.6M16.3
GPT-4o (Nov)77.5M55.0M13.4C
GPT-4o Mini (Jul)71.3M58.6M7.2M
GPT-4 (Apr)64.636.8A-
Gemini 1.5 Pro (002)76.4M58.4M35.2B
Gemini 1.5 Flash (002)76.1M46.2M59.9M
Gemini 1.0 Pro (001)48.017.9A-
Llama 3.2 90B73.2M21.6M8.3M
Llama 3.2 11B65.1M22.1M3.7M
+ +Table 5: Quantitative results on three multi-modal agentic benchmarks: VisualWebBench [43], MM-Mind2Web [86] and GroundUI-1K [87]. Reference numbers are taken from the corresponding benchmark papers [43, 86, 87] and leaderboard [3]. Remarks: (A) uses in-context learning (ICL) (please note that Amazon Nova models do not need to rely on in-context examples); (B) Gemini 1.5 Pro (001); (C) GPT-4o (May); (D) Macro average over individual metrics; (M) Measured by us. + +# 2.3 Long context + +We evaluate Amazon Nova Pro, Lite, and Micro on tasks that require the models to understand and reason over long context. These skills are crucial for tasks such as long multi-turn conversations, reasoning over long lists of retrieved documents, or understanding long videos. Amazon Nova Micro, Lite, and Pro models support context lengths of 128k, 300k, and 300k tokens, respectively. We used the following benchmarks to evaluate our models' long context performance: + +- Text Needle-in-a-Haystack (NIAH): Following [40], we assessed each model's ability to locate specific information (the "needle") within extensive contexts (the "haystack"). This "needle-in-a-haystack" test evaluates the model's performance on context lengths starting at $32\mathrm{k}$ , allowing us to measure its ability to accurately retrieve information across varying lengths of input context. +- SQuALITY [76] (ZeroScrolls Benchmark [69]): Focused on query-based summarization of literary stories, this task evaluates the model's capacity to generate relevant summaries from large contexts. +- LVBench [77]: This multimodal benchmark includes questions about YouTube videos from various domains such as TV series, sports, broadcasts, and surveillance footage. The LVBench dataset consists of 99 videos and 1,549 questions, covering six different types of tasks such as reasoning, event understanding and summarization. + +![](images/a575838c54ef1139078534f10109c9a2c9a9db02f73565857edd5970bcc3d3d6.jpg) +Figure 2: Text Needle-in-a-Haystack recall performance for Nova Micro (up-to 128k), Nova Lite (up-to 300k) and Nova Pro (up-to 300k) models. + +![](images/62810bb4a4f0c1a6ec5b253cc0bdfe5416772d4bd04d5e56463fb15a82e82c78.jpg) + +![](images/0cbd85126bd687b64f349529061c5f9e6d085266463731bb8ec5d4319a9c86c7.jpg) + +
SQuALITY ROUGE-LLVBench accuracy
Nova Pro19.8 ±8.741.6 ±2.5
Nova Lite19.2 ±8.640.4 ±2.4
Nova Micro18.8 ±8.6-
Claude 3.5 Sonnet (Jun)13.4 ±7.5-
Gemini 1.5 Pro (001)-33.1 ±2.3
Gemini 1.5 Pro (002)19.1 ±8.6M-
Gemini 1.5 Flash (002)18.1 ±8.4M-
GPT-4o18.8 ±8.630.8 ±2.3
Llama 3 - 70B16.4 ±8.1-
Llama 3 - 8B15.3 ±7.9-
+ +Table 6: Text and Multimodal long context performance on SQuALITY (ROUGE-L) and LVBench (Accuracy). For SQuALITY, measurements for Claude 3.5 Sonnet, GPT-4o, Llama 3 70B and Llama 3 8B are taken from the Llama 3 report [45]. Gemini results were measured by $\mathrm{us}^2$ ( $M$ ). For LVBench, Gemini and GPT-4o numbers were taken from the corresponding benchmark leaderboard [77]. + +Results for text and multimodal long context benchmarks are presented in Table 6. In the long video question answering task, both Amazon Nova Pro and Lite demonstrate robust performance on the LVBench dataset, surpassing other models. Amazon Nova models consistently demonstrate exceptional performance in retrieving information from any depth across both text and multimodal understanding use cases, delivering high accuracy and reliability. + +# 2.4 Functional expertise + +In addition to core capabilities, foundation models must perform well in particular specialties and domains. Across our many areas of performance analyses, we have selected four domains for which to present benchmarking results: Software engineering, financial analysis, and retrieval-augmented generation. Prompt templates for all benchmarks can be found in Appendix B.3. + +
SoftwareFinanceRAG
HumanEval PythonFinQACRAG
tok/sec0-shot pass@10-shot accuracyaccuracy
Nova Pro10089.0 ±4.877.2 ±0.950.3 ±1.9
Nova Lite15785.4 ±5.473.6 ±0.943.8 ±1.9
Nova Micro21081.1 ±6.065.2 ±1.043.1 ±1.9
Claude 3.5 Sonnet (Oct)5793.7 ±3.777.3 ±0.9M52.6 ±1.8M
Claude 3.5 Haiku6488.1 ±5.073.9 ±0.9M31.9 ±1.8M
Gemini 1.5 Pro (002)5887.8 ±5.0M74.4 ±0.9M48.9 ±1.9M
Gemini 1.5 Flash (002)19081.1 ±6.0M73.5 ±1.0M42.4 ±1.9M
Gemini 1.5 Flash 8B (001)28381.1 ±6.0M63.7 ±1.0M37.7 ±1.8M
GPT-4o16390.2 ±4.671.1 ±1.0M52.0 ±1.9M
GPT-4o Mini11387.2 ±5.170.6 ±1.0M49.9 ±1.9M
Llama 3.2 90B4080.5 ±6.172.8 ±1.0M45.2 ±1.9M
Llama 3.2 11B12472.6 ±6.860.8 ±1.1M42.2 ±1.9M
Llama 3.1 8B15772.6 ±6.861.2 ±1.0M42.2 ±1.8M
+ +Table 7: Performance on select functional benchmarks, including software engineering benchmarks in Python with HumanEval [19], financial reasoning with FinQA [20], and retrieval augmented generation with CRAG [82]. CRAG uses our scoring method described in Section 2.4.3. Where available, reference numbers are taken from the corresponding benchmark papers and technical reports [13, 11, 32, 39, 45, 58]. Additional results were measured $(M)$ by $\mathrm{us}^2$ . Model speed in tokens per second (Tok/Sec) is reproduced from section 2.5. + +# 2.4.1 Software engineering + +We assessed Amazon Nova's code generation capabilities on the Python coding task HumanEval [19]. The benchmark contains 164 original programming problems with unit tests. These problems assess language comprehension, algorithms, and simple mathematics. Some problems are comparable to simple software interview questions. Table 7 provides the performance of our Nova models and select public models. + +# 2.4.2 Financial analysis + +We use FinQA [20] to evaluate Amazon Nova's ability to understand financial data. FinQA is an expert-annotated dataset comprising 8,281 financial question-answer pairs derived from the earnings reports of S&P 500 companies. It evaluates a model's ability to extract information from both tables and unstructured text, while accurately performing calculations using relevant financial knowledge. We report the average post-rounding accuracy under the 0-shot CoT setting. Table 7 provides the performance of Amazon Nova models and select public models on FinQA. + +# 2.4.3 Retrieval augmented generation + +We evaluate RAG capabilities on the CRAG [82] benchmark using the Task 1 setup, which considers five pre-selected HTML pages as external knowledge to each input question. We extract top-20 text snippets from these pages following the standard retrieval approach used in CRAG's official repository, whereby pages are first cleaned using BeautifulSoup to remove HTML tags, after which the text is then split into sentences or chunks no longer than 1000 characters. These are then encoded using the sentence-transformers/all-MiniLM-L6-v2 model, which is also used to encode the question. The top 20 chunks with highest similarity are passed as context in the input for model inference. We report the percentage of correct responses as judged by an LLM (gpt-4-turbo-2024-04-09), which compares each model's answer with the expected answer using the prompt shown in Appendix B.3.2. Table 7 provides the performance of Amazon Nova models and selected public models on a combined validation and test set of 2,706 examples. + +# 2.5 Runtime performance + +We evaluate the runtime performance of Amazon Nova models using three metrics: Time to First Token (TTFT), Output Tokens per Second (OTPS) and Total Response Time. TTFT is measured as the time, in seconds, it takes to receive the first token from the model after an API request is sent. OTPS is measured as the number of tokens generated per second (tok/sec). It is the rate at which a model produces subsequent output tokens after the first token, reflecting overall throughput and efficiency during inference. Total Response Time measures the total duration in seconds from the submission of the input prompt to the end of generation sequence for a given input-output prompt length. It represents the overall user experience for a model. + +In Figure 3, we show TTFT, OTPS, and Total Response Time using 1000 tokens of input and 100 tokens of output for Amazon Nova models and select public models as reported by Artificial Analysis5, an independent entity that benchmarks AI models and hosting providers. Amazon Nova Micro, Lite and Pro models are among the fastest models in their respective intelligence tiers. Together, all three Amazon Nova models demonstrate state-of-the-art runtime performance, ensuring a smooth and responsive user experience in many real world use cases. + +![](images/e774b73fa1e735c2e5327408e7138bc5a659ab582e779f6af3b14f94864a0daa.jpg) + +![](images/f66f29a61daabbd25d85f8a0f81690ced064d2fb9861e120cc24a4a32227f13c.jpg) + +![](images/4754f9755877defa3dbe1fd9cd96eb21940d26e1151935c60631550c46e02dc5.jpg) +Figure 3: Time to First Token $(\downarrow)$ , Output Tokens per Second $(\uparrow)$ , and Total Response Time $(\downarrow)$ using 1,000 tokens of input and 100 tokens of output for Amazon Nova models and select publicly-available models (Artificial Analysis, Nov 29th, 2024). + +# 3 Amazon Nova Canvas Evaluation + +Amazon Nova Canvas is a diffusion model that takes a text prompt and an optional RGB image as input and generates an image as an output conditioned on the input text and optional image. Illustrative examples of the images generated by Amazon Nova Canvas can be found in our Amazon Science blog post $^{6}$ . In this section, we provide details on the evaluation strategy and performance of the model both in terms of automated metrics and human evaluation. + +# 3.1 Automated metrics + +We use ImageReward [80] and Text-to-Image Faithfulness (TIFA) [38] as automated metrics. + +- ImageReward score is generated from a standardized reward model that aligns human preference with the predicted score. To compute the ImageReward score, we randomly sample 10k prompts from MSCOCO2014 [42] validation set and use this set for calculating the score. +- Text-to-Image Faithfulness (TIFA) score is a reference-free metric that measures the faithfulness of a generated image to the input text via visual question answering (VQA). The evaluation set for TIFA score is a pre-selected 4k prompts in the TIFA-v1.0 benchmark, sampled from MSCOCO captions [42], DrawBench [66], PartiPrompts [84], and PaintSkill [21] datasets. + +We compare Amazon Nova Canvas with other publicly-available models including DALL.E 3 [16], Stable Diffusion 3 Medium [27], Stable Diffusion 3.5 Large [28] and Flux (Schnell and Pro) [17]. The results are shown in Table 8. + +
TIFAImageReward
Amazon Nova Canvas0.8971.250
DALL.E 30.8631.052
Stable Diffusion 3.5 Large0.8911.082
Stable Diffusion 3 Medium0.8810.952
Flux Pro 1.00.8751.075
Flux Schnell0.8820.999
+ +Table 8: Comparison of TIFA and ImageReward metrics of Amazon Nova Canvas with other models. + +# 3.2 Human evaluation + +We conduct A/B testing to compare Amazon Nova Canvas with other third-party text-to-image models. The A/B testing prompt set is composed of approximately 1,000 prompts designed to capture customer usage of text-to-image models. This set includes prompts from datasets such as MSCOCO [42], Drawbench [66], OpenParti [84], DALL.E 3 Eval [16], and DOCCI [54] and covers a broad set of categories such as humans, landscapes, natural scenarios, indoor environments, creative themes, artistic themes, and so forth. A few prompts were randomly selected and repeated in order to get additional data points on the quality of the model. + +With each prompt we generate an image from Amazon Nova Canvas as well as each other text-to-image model. We used random seeds to generate the images from Amazon Nova Canvas and all images were generated at $1\mathrm{k}\times 1\mathrm{k}$ resolution. If the prompts trigger filters such that an image is not generated, for either the Amazon Nova Canvas model or the public text-to-image model, we ignore that prompt and do not show it to the human raters. All human evaluation is done in a single-blind manner where the annotator is provided two sets of images, one from Amazon Nova Canvas and the other from the third-party model. The order of the images are randomized for each prompt and annotator. In our blind testing, we ask human annotators to select images that they prefer based on (1) text-image alignment, which measures the instruction-following capability of the model, and (2) image quality, which quantifies the overall preference of the annotators. To ensure rigorous, consistent, and unbiased evaluation, we used a third-party vendor for human evaluation. We created guidelines that were used to train the annotators so that the decision-making criteria were clear to them in each dimension. + +The pair-wise results comparing Amazon Nova Canvas with OpenAI DALL.E 3 and Google Imagen 3 are shown in Table 9, including win, tie, loss rate. The win rate reflects the percentage of samples where Amazon Nova Canvas was + +preferred over the other model while the tie rate indicates the scenario where the human annotator did not perceive a difference between the two models. As can be seen in the results, Amazon Nova Canvas has a higher win rate compared to the other text-to-image models. + +
Nova Canvas versus:DALL.E 3Imagen 3
win ratetie rateloss ratewin ratetie rateloss rate
Overall preference (image quality)54.56.439.148.25.346.5
Instruction following (text-image alignment)39.422.538.138.428.133.5
+ +Table 9: The win, tie, and loss rates (%) from human evaluation of Amazon Nova Canvas versus (a) DALL.E 3 and (b) Imagen 3. + +# 4 Amazon Nova Reel Evaluation + +Amazon Nova Reel is a diffusion model that takes a text prompt and an optional RGB image as input and generates a video as an output conditioned on the input text and optional image. Illustrative examples of the videos generated by the Amazon Nova Reel can be found in our Amazon Science blog post.7 In this section, we provide details on the evaluation strategy and performance of the model. + +# 4.1 Human evaluation metrics + +To evaluate Amazon Nova Reel, we rely on human feedback to assess the generated videos across two primary axes: video quality and video consistency. All evaluations are conducted through single-blind pairwise comparisons. Human annotators are provided a set of two videos shown side-by-side and are asked to choose the better video or mark them as equal if they find the videos to be equally performant across the metric on which they are evaluating. All videos were generated in 720p resolution and different random seeds were used during generation. + +The video quality axis encapsulates the technical and perceptual aspects of the generated video via four primary components: + +- Image quality: The visual appeal of individual frames, including resolution, sharpness, object clarity, and overall composition, where each frame is visually pleasing and artifact-free. +- Motion quality: The fluidity of movement across frames, including motion consistency and smooth transitions without flickering, distortion, or abrupt shifts, contributing to natural and realistic motion portrayal. +- Image-text alignment: How closely individual frames match the prompt, considering the presence of described entities, their attributes, spatial relationships, colors, and other static visual details. +- Motion-text alignment: The accuracy of dynamic elements, including the correctness of actions performed by entities, camera movements, and temporal changes in attributes, as well as adherence to the provided description. + +The video quality axis additionally includes factors influencing overall appeal, such as motion degree, entity size, creative composition, and general video likability. + +The video consistency axis encapsulates the temporal coherence of both subjects and backgrounds throughout the video. It includes assessments of the maintenance of entity size, shape, and appearance, as well as background stability without unexpected morphing or changes. A high score in this dimension means believable spatial relationships between foreground and background elements throughout the video duration. + +In combination, the video quality and video consistency metrics provide a holistic and robust evaluation framework for video generation models by considering both technical accuracy and perceptual appeal. + +# 4.2 Dataset + +We curated a diverse set of prompts designed to capture various aspects of video generation. The prompts are distributed across 6 broad categories: human and activities, animals, natural scenery and landscapes, indoor scenes, objects + +interactions, and creative scenes and activities. This broad categorization ensures that the evaluation covers a wide range of real-world scenarios. We structured the prompt set to cover various motion-related aspects, which is critical for assessing motion-text alignment in the generated videos. For example, we included prompts with a variety of camera motions to evaluate how well the models follow instructions related to camera movement. Additionally, we incorporated dynamic attributes [71], in which the subject or background undergoes state or shape changes over time, which allows us to evaluate the model's ability to generate evolving entities. Finally, we added prompts that require motion binding [71], where specific compositions of movements and actions are requested, enabling us to assess how well models can generate complex, coordinated motions. The curated prompt set consists of approximately 700 prompts, all from various open source benchmarks. + +# 4.3 Implementation details & results + +To ensure a rigorous, consistent and unbiased evaluation process, we outsourced the annotation collection process to a third-party vendor. We created detailed guidelines, in which annotators were given comprehensive instructions and examples for each evaluation dimension, ensuring clarity on the criteria for marking preferences between videos. These guidelines included examples of different scenarios to aid in decision-making across our evaluation axes. Alongside this, we ensured that annotators were trained using expert-provided examples, with each round of annotations subject to spot checks. Specifically, $5 - 10\%$ of the data from each batch was randomly selected and reviewed by expert annotators. Based on this feedback, the vendor continuously refined the annotators' understanding and accuracy, ensuring a high standard of evaluation across the board. To further enhance the reliability of the results, we employed a consensus voting system. For each video comparison, annotations were collected from three different evaluators, and a majority voting approach was used to determine the final outcome. This method helps reduce individual biases and ensures that the final assessments are based on collective judgment, thereby increasing the robustness of the evaluation. + +For reporting performance, we conducted pairwise comparisons between Amazon Nova Reel and other state-of-the-art models including Gen3 Alpha [65] by Runway ML and Luma 1.6 [47] by Luma Labs. We report results in terms of win, tie, and loss rates. The win rate reflects the percentage of samples where Amazon Nova Reel was preferred over the other model, while the tie rate indicates cases where no perceptible difference between the two models was found by the evaluators. Using the curated prompt set described earlier, we evaluate the models across all the dimensions outlined above, and report the results in Table 10. + +
Nova Reel versus:Runway Gen3 AlphaLuma 1.6
win ratetie rateloss ratewin ratetie rateloss rate
Video Quality56.49.933.751.13.445.5
Video Consistency67.09.123.974.75.120.2
+ +Table 10: The win, tie, and loss rates $(\%)$ from human evaluation of Amazon Nova Reel versus (a) Gen3-Alpha and (b) Luma1.6. + +In video consistency, Amazon Nova Reel achieved win rates of $67.0\%$ against Gen3 Alpha and $74.7\%$ against Luma 1.6, demonstrating superior subject and background coherence. For video quality, Amazon Nova Reel secured win rates of $56.4\%$ against Gen3 Alpha and $51.1\%$ against Luma 1.6. + +# 5 Responsible AI + +Our approach to Responsible AI (RAI) is structured around eight foundational dimensions [10] shown in Table 11. These dimensions guide our approach to RAI for the Amazon Nova family of models, which we articulate in the following three sections: (1) defining our RAI design objectives, (2) our actions to ensure adherence to these objectives, and (3) system evaluation and red teaming. The last two components form a continuous loop of model development and human/automated verification to ensure that our Amazon Nova models are aligned with our RAI objectives and deliver an exceptional and delightful customer experience. + +# 5.1 Defining our RAI objectives + +We operationalize our RAI dimensions into a series of detailed design objectives that guide our decision-making throughout the entire model development lifecycle, from initial data collection and pre-training to the implementation of post-deployment runtime mitigations. + +
TermDefinition
FairnessConsidering impacts on different groups of stakeholders
ExplainabilityUnderstanding and evaluating system outputs
Privacy and securityAppropriately obtaining, using, and protecting data and models
SafetyPreventing harmful system output and misuse
ControllabilityHaving mechanisms to monitor and steer AI system behavior
Veracity and robustnessAchieving correct system outputs, even with unexpected or adversarial inputs
GovernanceIncorporating best practices into the AI supply chain, including providers and deployers
TransparencyEnabling stakeholders to make informed choices about their engagement with an AI system
+ +Table 11: Our eight core Responsible AI dimensions + +In addition to being grounded on the RAI dimensions, our objectives are informed by relevant laws and regulations, voluntary frameworks, and our commitments to our customers, and they undergo an internal alignment process that includes reviews from a number of stakeholders. We will continue to iterate on these objections as we engage with external experts and participate in industry and government forums, including the Frontier Model Forum [29], Partnership on AI [5], and various forums organized by government agencies such as the National Institute of Standards and Technology (NIST) of the U.S. Department of Commerce [7]. + +Our commitment to Responsible Scaling: As the capabilities of AI models increase (through increased training data, model size or architecture innovations), so do the potential risks that they present. We joined other technology companies in signing on to the White House's voluntary commitments on the safe, secure, and transparent development and use of foundation models [6]. Since then we have actively participated in other efforts, including the AI Safety Summits in the UK and Seoul, and we have committed to new standards like the G7 AI Hiroshima Process Code of Conduct [30] in accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. We also started a partnership with the Model Evaluation and Threat Research (METR) center8 to enrich our Controllability design objectives. + +# 5.2 Ensuring adherence to RAI objectives + +We employed a number of methods to measure and ensure compliance for each of our core RAI dimensions depending on their scope (i.e., whether they apply to model output, data management or other processes). For the dimensions that govern model behavior (Safety, Fairness, Veracity and Robustness, Controllability, and Privacy and Security), we curated the pre-training data and we used both Supervised Fine Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methods to align our models. Based on the objectives for each RAI dimension, we created single- and multi-turn RAI demonstrations in multiple languages and conducted helpfulness/harmfulness studies to decide on SFT data mixes. We collected human preference data to be used as inputs to RLHF training where we also provided an RAI-specific reward model. We also identify risk areas during our offline evaluation or red teaming exercises (Section 5.4) and collect semantically similar examples to be included in future SFT and RLHF rounds. + +In addition to the RAI model alignment, we built runtime input and output moderation models which serve as a first and last line of defense and allow us to respond more quickly to newly identified threats or gaps in model alignment. The main role of the input moderation model is to detect prompts that contain malicious, insecure or illegal material, or attempt to bypass the core model alignment (prompt injection, jailbreaking). Similarly, the output moderation ensures that the content adheres to our RAI objectives. + +We have a rigorous Governance methodology, developing our models in a working-backwards product process that incorporates RAI at the design phase, design consultations and implementation assessments by dedicated RAI science and data experts, and includes routine testing, reviews with customers, best practice development, dissemination, and training. + +We work to ensure that our Privacy and Security objectives are adhered to for both the model and training data. In addition to the model output alignment described above, we take measures that include data access controls [9] protecting our model training data, resulting weights, and model versions, and watermarking model outputs (see below). We address the latter through several layers of defense, including de-identifying or removing certain types of personal data from our training data, when feasible, as well as evaluation through red teaming exercises that cover data privacy assessments. + +For Explainability of our models' outputs we conduct and leverage the current active research in the area of Explainable AI to deeply understand our models' current behavior, their potential future behavior, and to build capabilities to continuously correct their behavior as and when necessary. We use various explainable AI methods throughout our model development to guide our decisions regarding RAI alignment and other mitigations. Services like Clarify [8] also enable our downstream developers to easily explain model predictions. + +To work to ensure our models' Robustness against adversarial inputs such as those that attempt to bypass alignment guardrails, we focused on risks applicable to both developers building applications using our models, and users interacting with our models via those applications. We organized those risks in broad categories such as sensitive data exfiltration, execution of unauthorized action, degradation of run-time model service availability, and malicious content generation. We used this risk organization to build model resiliency against interactions that lead to the prioritized risks. + +Finally, to maximize Transparency, we incorporate an invisible watermark during the image or video generation process and add $\mathrm{C2PA}^9$ metadata in all Canvas generated content. We enhanced the robustness to alterations like rotation, resizing, color inversion, and flipping. For videos, we embed our watermark in each frame and ensure that our watermarking and detection methods withstand H264 compression. To enable anyone to easily detect the watermarks in Amazon Nova generated content, an API will be available soon after launch. Our watermark detection system introduces several enhancements such as making confidence score-based predictions instead of a single binary prediction that reflects the extent to which the generated content has been edited even when using external tools. The new detection system covers both images and videos. + +# 5.3 RAI Evaluation + +Throughout model development we perform extensive RAI evaluations using publicly available benchmarks like BOLD [25], RealToxicityPrompts [31], and MM-SafetyBench [44]. We also built a series of proprietary, dynamically updating benchmarks. To build them, our internal data annotation team created a diverse set of examples for each of our RAI dimensions. In addition, we leveraged subject-matter experts in specific areas, such as Security and Controllability, to collect adversarial prompts. We continued updating and enhancing each dataset based on evaluation and red teaming results (see Section 5.4 for more details on red teaming). This kept the internal benchmarks evergreen, avoiding overfitting during development, but also made sure the models do not regress against previously identified risks. Our datasets comprise inputs in multiple languages and multiple modalities, and contain single-turn and multi-turn conversation examples. + +# 5.4 Red Teaming + +Static benchmarks give us a view of how well models perform per RAI dimension against a user's "plain" intent (i.e. the prompts explicitly state the intent of the user to generate prohibited content). To test our models' resilience against techniques that mask the users' intent we rely on red teaming. We employed a multi-pronged evaluation strategy consisting of internal red teaming, red teaming with third party and subject matter experts and, automated red teaming. + +# 5.4.1 Internal Red Teaming + +We used a team of trained data analysts and subject-matter experts to perform regular red teaming exercises to evaluate the model's robustness against adversarial prompts across all our RAI dimensions. We enhanced the diversity of manually curated adversarial prompts by employing linguistic, structural, and modality based prompt mutation techniques, assessing each mutation for its effectiveness at generating a response that does not adhere to our RAI objectives, likelihood of its success, and the technique's novelty to a model revision. In total, we identified and developed over 300 distinct techniques (see Figure 4), and tested techniques individually and via chaining various combinations. The attacks covered multiple languages and modalities, targeting each language/modality individually and in combination. We designed cross-modality attacks, such as embedding adversarial content within seemingly benign visual inputs, to evaluate the models' ability to handle complex scenarios involving multiple input types. Where appropriate, we implemented automation to further improve the diversity, reliability, and efficiency of red teaming. + +![](images/744fe999753b87a0d695ae52fa9855a52df774d240e85120dba86e378b9958a8.jpg) +Figure 4: Broad taxonomy and count of attack techniques we use for our red teaming exercises + +After each round of red teaming, we gathered feedback from the team regarding failure patterns which guided the next stage of the model development. + +# 5.4.2 External Red Teaming + +In accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Artificial Intelligence, we partner with a variety of third parties to conduct red teaming against our AI models. These initiatives are in addition to our extensive in-house efforts, which includes all aspects of Cybersecurity red teaming. Just like with our internal red teaming efforts, we iterated during the model development based on feedback from these institutions to improve the RAI adherence of our models. We leverage red-teaming firms including ActiveFence to conduct testing in areas such as hate speech, political misinformation, extremism and other RAI dimensions. We also work with specialized third parties to red team our models for Chemical, Biological, Radiological and Nuclear (CBRN) capabilities. Our work with Deloitte Consulting, tests our AI models' capabilities in Biological risks and harms. Our work with Nemesys Insights LLC tests our AI models' capabilities in the Radiological and Nuclear domains. We also work with the Gomes Group at Carnegie Mellon University to test our models' capabilities in Chemistry and chemical compounds. Each of these partners was carefully selected based on their industry leadership, previous/parallel red teaming work with other AI model developers, and their contributions to evolving government and industry standards around CBRN and overall AI safety. We provide a brief summary of expertise of each of these vendors and their testing methodology below. + +ActiveFence: ActiveFence is a team of over 150 subject matter experts providing AI Safety and Content Moderation solutions. The team produced over 9,700 adversarial prompts, distributed over 20 categories, including content-targeted red teaming (evaluating the model's ability to generate harmful or inappropriate content), and security-targeted red teaming (assessing the model's resilience against malicious attempts to manipulate its behavior or extract sensitive information). + +Deloitte: The evaluation team at Deloitte Consulting LLP (formerly known as Gryphon Scientific) has unique experience at the intersection of artificial intelligence and biology. The primary thrust of this effort involved evaluating the model against a panel of 30 questions developed to test an LLM's scientific knowledge and reasoning capabilities that could facilitate the development or use of biological weapons. The model's responses to these questions were evaluated for their scientific accuracy and utility to someone seeking to do harm with biology. After completing the initial + +evaluations, the Deloitte team probed more deeply into the questions the LLM originally replied with potentially concerning information. + +Gomes Group: The Gomes Group at Carnegie Mellon University is at the forefront of integrating advanced artificial intelligence into chemical research. Their evaluation framework consisted of both automated and non-automated assessments. Two non-automated evaluations explored aggregation attack vulnerabilities through purchasing and remote chemical mixing scenarios. The automated evaluations utilized two distinct datasets: one containing 39 hazardous chemicals (including DEA Schedule I, II, and chemical warfare agents) and another with 362 common chemicals for NFPA diamond classifications. Three primary automated evaluations were conducted using the hazardous chemicals dataset. The NFPA diamond evaluation comprised 1,810 prompts, testing both single-turn and multi-turn approaches with consistent accuracy across both methods. + +Nemesys: Nemesys Insights LLC run uplift studies, red teaming exercises, and risk assessments for a variety of technology companies and third-party research entities to assess national security related risks of large language models and other generative AI tools. For their testing, they started with human red teaming exercises focused on non-state acquisition or use of illicit radiological/nuclear (RN) materials, followed by prompt-response evaluation and uplift studies. The exercises comprised two different scenarios (a. violent non-state actor acquisition and use of Cobalt-60; b. non-state actor acquisition and international transport of HEU [highly enriched uranium]), and utilized 8 subject matter experts with operational and technological knowledge in a 2-team x 2-scenario design to construct and refine threat plans across a 6-hour planning cycle. + +# 5.4.3 Automated Red Teaming + +Finally, to augment human based red teaming, we built an automated red teaming mechanism by adapting our (Feedback Loop In-context Red Teaming) FLIRT [52] framework. This approach helped us scale red teaming and repeat red teaming efficiently. FLIRT uses a list of seed prompts that have been identified by human evaluators as potentially violating one or more of our RAI dimensions. For every dimension, a subset of seeds is used to generate additional prompts with a dedicated language model, called red-LM, through in-context-learning (ICL) [18] and a carefully crafted set of instructions. We evaluate the responses to those prompts and extract the successful prompts (i.e., the ones triggering a prohibited response) for the next round of generation. The above steps are repeated for a chosen number of iterations across all RAI categories. We use our automated red teaming mechanism to evaluate both RAI adherence robustness and false refusals. We use the mechanism to generate adversarial tests across multi-turn interactions, multiple languages, and multiple input/output modalities to uncover and correct robustness issues in our models due to potential adversarial content in such interactions and inputs. + +# 6 Training Infrastructure + +The Nova family of models were trained on Amazon's custom Trainium1 (TRN1) chips, $^{10}$ NVidia A100 (P4d instances), and H100 (P5 instances) accelerators. Working with AWS SageMaker, we stood up NVidia GPU and TRN1 clusters and ran parallel trainings to ensure model performance parity, while optimizing training throughput on the different stacks. All clusters utilize petabit-scale non-blocking EFA network fabric which is less prone to packet loss than other network transport protocols $^{11}$ and provides the highest network bandwidth with H100 accelerators compared to any other instance type available on AWS EC2 $^{12}$ . We conducted distributed training on AWS SageMaker-managed Elastic Kubernetes Service (EKS) clusters, and utilized AWS File System X (FSx) and Simple Storage Solution (S3) for data and checkpoint IO. While FSx offers performant and convenient storage for large scale training jobs, S3 allowed cost-efficient scaling to large multimodal datasets and model checkpoints. + +Goodput achieved weekly average values of up to $97\%$ in pretraining runs through optimizations targeting lower job failure rate, minimizing checkpoint overhead, and overall reduction in the Mean Time to Restart (MTTR). This time is inclusive of time from the last successful checkpoint before training interruption, time taken to restart components of the system and resume training at steady state from checkpoint. Techniques such as fully distributed optimizer state and weight sharding and the elimination of all blocking overhead associated with checkpoint persistence resulted in a reduction of checkpointing overhead to $\sim 1$ sec on H100 clusters, and $\sim 0.1$ sec on TRN1 clusters. We exceeded our MTTR target of 9 minutes and achieved an average of 6.5 minutes on our TRN1 clusters by optimizing the + +node communication initialization in the training startup process and reduced time to load checkpoints through an asynchronous observer process. This process maps each latest checkpoint file to its corresponding node in the cluster. When resuming from the checkpoint, each node only loads the checkpoint files for its corresponding rank, reducing the time taken to discover the latest checkpoint from 3 minutes to 5 seconds. We also cache and reuse data indices to optimize training data loading initialization time. These improvements reduced data loading initialization to 205ms per restart. + +To increase training efficiency we developed a new activation checkpointing scheme called Super-Selective Activation Checkpointing (SSC). SSC minimizes activation re-computation in memory-constrained environments, reducing memory consumption by $\sim 50\%$ while adding $\sim 2\%$ re-computation overhead compared to NVidia's Selective Checkpointing. We also found optimizations in default gradient reduction behavior and the default PyTorch memory allocator behavior. The default gradient reduction behavior leads to suboptimal communication overlap and we found the synchronous nature of the default PyTorch allocation led to stragglers in collectives resulting in multiple stalled workers. We adjusted the gradient reduction order and frequency, allowing us to overlap the majority of data parallelism communication. + +# References + +[1] Efficient Batch Computing - AWS Batch - AWS, 2024. URL https://aws.amazon.com/batch/. +[2] Big Data Platform - Amazon EMR - AWS, 2024. URL https://aws.amazon.com/emr/. +[3] AgentStudio. Gemini flash. https://computer-agents.github.io/agent-studio/, 2024. Accessed: 2024-11-29. +[4] P. Agrawal, S. Antoniak, E. B. Hanna, B. Bout, D. Chaplot, J. Chudnovsky, D. Costa, B. D. Monicault, S. Garg, T. Gervet, S. Ghosh, A. Héliou, P. Jacob, A. Q. Jiang, K. Khandelwal, T. Lacroix, G. Lample, D. L. Casas, T. Lavril, T. L. Scao, A. Lo, W. Marshall, L. Martin, A. Mensch, P. Muddireddy, V. Nemychnikova, M. Pellat, P. V. Platen, N. Raghuraman, B. Rozière, A. Sablayrolles, L. Saulnier, R. Sauvestre, W. Shang, R. Soletskyi, L. Stewart, P. Stock, J. Studnia, S. Subramanian, S. Vaze, T. Wang, and S. Yang. Pixtral 12B, 2024. URL https://arxiv.org/abs/2410.07073. +[5] Amazon. Amazon joins Partnership on AI. https://www/aboutamazon.com/news/amazon-ai/amazon-joints-partnership-on-ai, 2016. Accessed: 2024-11-20. +[6] Amazon. Our commitment to the responsible use of AI. https://www/aboutamazon.com/news/company-news/amazon-responsible-ai, 2023. Accessed: 2024-11-20. +[7] Amazon. Amazon joins US Artificial Intelligence safety institute to advance responsible AI. https://www.abou tamazon.com/news/policy-news-views/amazon-joins-us-artificial-intelligence-safety-i nstitute-to-advance-responsible-ai, 2024. Accessed: 2024-11-20. +[8] Amazon. Amazon SageMaker Clarify. https://aws.amazon.com/sagemaker/clarify/, 2024. Accessed: 2024-11-20. +[9] Amazon. Data protection & privacy at AWS. https://aws.amazon.com/compliance/data-protection/, 2024. Accessed: 2024-11-20. +[10] Amazon. Building AI responsibly at AWS. https://aws.amazon.com/ai/responsible-ai/, 2024. Accessed: 2024-11-20. +[11] Anthropic. The Claude 3 model family: Opus, Sonnet, Haiku. Technical report, Anthropic, 2023. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf. +[12] Anthropic. Claude Sonnet. https://www.anthropic.com/claude/sonnet, 2024. Accessed: 2024-11-20. +[13] Anthropic AI. Claude 3.5 Sonnet model card addendum. Technical report, 2024. +[14] Anthropic AI Team. Claude 3.5 Haiku and upgraded Claude 3.5 Sonnet, 2024. URL https://assets.anthropic.com/m/1cd9d098ac3e6467/original/Claude-3-Model-Card-October-Addendum.pdf. +[15] S. Arora and B. Barak. Computational complexity: a modern approach. Cambridge University Press, 2009. +[16] J. Betker, G. Goh, L. Jing, T. Brooks, J. Wang, L. Li, L. Ouyang, J. Zhuang, J. Lee, Y. Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023. +[17] Black Forest Labs. Flux models. 2024. URL https://github.com/black-forest-labs/flux. +[18] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020. +[19] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman, et al. Evaluating large language models trained on code, 2021. +[20] Z. Chen, W. Chen, C. Smiley, S. Shah, I. Borova, D. Langdon, R. N. Moussa, M. I. Beane, T.-H. K. Huang, B. R. Routledge, and W. Y. Wang. FinQA: A dataset of numerical reasoning over financial data. ArXiv, abs/2109.00122, 2021. URL https://api-semanticscholar.org/CorpusID:235399966. +[21] J. Cho, A. Zala, and M. Bansal. DALL-eval: Probing the reasoning skills and social biases of text-to-image generation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3043-3054, 2023. + +[22] P. Clark, I. Cowhey, O. Etzioni, T. Khot, A. Sabharwal, C. Schoenick, and O. Tafjord. Think you have solved question answering? try ARC, the AI2 reasoning challenge. arXiv:1803.05457v1, 2018. +[23] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano, C. Hesse, and J. Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +[24] X. Deng, Y. Gu, B. Zheng, S. Chen, S. Stevens, B. Wang, H. Sun, and Y. Su. Mind2Web: Towards a generalist agent for the web. In NeurIPS, 2023. +[25] J. Dhamala, T. Sun, V. Kumar, S. Krishna, Y. Pruksachatkun, K.-W. Chang, and R. Gupta. BOLD: Dataset and metrics for measuring biases in open-ended language generation. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21, page 862-872, New York, NY, USA, 2021. Association for Computing Machinery. ISBN 9781450383097. doi: 10.1145/3442188.3445924. URL https://doi.org/10.1145/3442188.3445924. +[26] D. Dua, Y. Wang, P. Dasigi, G. Stanovsky, S. Singh, and M. Gardner. DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs. In Proc. of NAACL, 2019. +[27] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3-medium. +[28] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Stable Diffusion 3.5. 2024. URL https://stability.ai/news/introducing-stable-diffusion-3-5. +[29] Frontier Model Forum. Amazon and Meta join the Frontier Model Forum to promote AI safety. https://www.frontiermodelforum.org/updates/amazon-and-meta-join-the-frontier-model-forum-t-o-promote-ai-safety/, 2024. Accessed: 2024-11-20. +[30] G7 Hiroshima Summit. Hiroshima process international code of conduct for organizations developing advanced AI systems. https://www.mofa.go.jp/files/100573473.pdf, 2023. Accessed: 2024-11-20. +[31] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith. RealToxicityPrompts: Evaluating neural toxic degeneration in language models. In T. Cohn, Y. He, and Y. Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 3356-3369, Online, Nov. 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.findings-emnlp.301. URL https://aclanthology.org/2020-findings-emnlp.301. +[32] Gemini Team. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024. URL https://arxiv.org/abs/2403.05530. +[33] Google Deepmind. Gemini Flash. https://deepmind.google/technologies/gemini/flash/, 2024. Accessed: 2024-11-20. +[34] N. Goyal, C. Gao, V. Chaudhary, P.-J. Chen, G. Wenzek, D. Ju, S. Krishnan, M. Ranzato, F. Guzmán, and A. Fan. The FLORES-101 evaluation benchmark for low-resource and multilingual machine translation. 2021. +[35] F. Guzmán, P.-J. Chen, M. Ott, J. Pino, G. Lample, P. Koehn, V. Chaudhary, and M. Ranzato. Two new evaluation datasets for low-resource machine translation: Nepali-english and sinhala-english. 2019. +[36] D. Hendrycks, C. Burns, S. Basart, A. Zou, M. Mazeika, D. Song, and J. Steinhardt. Measuring massive multitask language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ. +[37] D. Hendrycks, C. Burns, S. Kadavath, A. Arora, S. Basart, E. Tang, D. Song, and J. Steinhardt. Measuring mathematical problem solving with the MATH dataset. NeurIPS, 2021. +[38] Y. Hu, B. Liu, J. Kasai, Y. Wang, M. Ostendorf, R. Krishna, and N. A. Smith. TIFA: Accurate and interpretable text-to-image faithfulness evaluation with question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20406-20417, 2023. +[39] R. Islam and O. M. Moushi. GPT-4o: The cutting-edge advancement in multimodal LLM. Technical report, 2024. + +[40] G. Kamradt. LLMTest NeedleInAHaystack, 2023. URL https://github.com/gkamradt/LLMTestNeedleInAHaystack/blob/main/README.md. +[41] D. P. Kingma. Auto-encoding variational Bayes. 2nd International Conference on Learning Representations, ICLR, 2014. +[42] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollár, and C. L. Zitnick. Microsoft COCO: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. +[43] J. Liu, Y. Song, B. Y. Lin, W. Lam, G. Neubig, Y. Li, and X. Yue. VisualWebBench: How far have multimodal llms evolved in web page understanding and grounding?, 2024. +[44] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao. MM-SafetyBench: A benchmark for safety evaluation of multimodal large language models. In A. Leonardis, E. Ricci, S. Roth, O. Russakovsky, T. Sattler, and G. Varol, editors, Computer Vision – ECCV 2024, pages 386–403, Cham, 2025. Springer Nature Switzerland. ISBN 978-3-031-72992-8. +[45] Llama Team, AI Meta. The Llama 3 herd of models, 2024. URL https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md. +[46] P. Lu, B. Peng, H. Cheng, M. Galley, K.-W. Chang, Y. N. Wu, S.-C. Zhu, and J. Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In The 37th Conference on Neural Information Processing Systems (NeurIPS), 2023. +[47] Luma Labs, 2024. URL https://lumalabs.ai/dream-machine. +[48] L. Madaan, A. K. Singh, R. Schaeffer, A. Poulton, S. Koyejo, P. Stenetorp, S. Narang, and D. Hupkes. Quantifying variance in evaluation benchmarks, 2024. URL https://arxiv.org/abs/2406.10229. +[49] K. Mangalam, R. Akshulakov, and J. Malik. EgoSchema: A diagnostic benchmark for very long-form video language understanding. In NeurIPS, 2023. +[50] A. Masry, D. X. Long, J. Q. Tan, S. Joty, and E. Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In ACL Findings, 2022. +[51] M. Mathew, D. Karatzas, and C. Jawahar. DocVQA: A dataset for VQA on document images. In WACV, 2021. +[52] N. Mehrabi, P. Goyal, C. Dupuy, Q. Hu, S. Ghosh, R. Zemel, K.-W. Chang, A. Galstyan, and R. Gupta. FLIRT: Feedback loop in-context red teaming. In EMNLP 2024, 2024. URL https://www.amazon.science/publications/flirt-feedback-loop-in-context-red-teaming. +[53] Meta. Llama 3.2 Github model card vision. https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md#instruction-tuned-models, 2024. Accessed: 2024-11-20. +[54] Y. Onoe, S. Rane, Z. Berger, Y. Bitton, J. Cho, R. Garg, A. Ku, Z. Parekh, J. Pont-Tuset, G. Tanzer, et al. DOCCI: Descriptions of connected and contrasting images. URL https://arxiv.org/abs/2404.19753. +[55] OpenAI. GPT 4o mini. https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence, 2024. Accessed: 2024-11-20. +[56] OpenAI. Hello GPT 4o. https://openai.com/index/hello-gpt-4o, 2024. Accessed: 2024-11-20. +[57] OpenAI Team. simple evals GPT4, 2024. URL https://github.com/openai/simple-evals. +[58] OpenAI Team. o1 mini system card, 2024. URL https://cdn.openai.com/o1-system-card-20240917.pdf. +[59] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe. Training language models to follow instructions with human feedback. In Advances in Neural Information Processing Systems, volume 35, pages 27730-27744, 2022. + +[60] S. G. Patil, T. Zhang, X. Wang, and J. E. Gonzalez. Gorilla: Large language model connected with massive APIs, 2023. URL https://arxiv.org/abs/2305.15334. +[61] W. Peebles and S. Xie. Scalable diffusion models with transformers. In ICCV, 2023. +[62] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. +[63] R. Rei, J. G. C. de Souza, D. Alves, C. Zerva, A. C. Farinha, T. Glushkova, A. Lavie, L. Coheur, and A. F. T. Martins. COMET-22: Unbabel-IST 2022 submission for the metrics shared task. In P. Koehn, L. Barrault, O. Bojar, F. Bougares, R. Chatterjee, M. R. Costa-jussa, C. Federmann, M. Fishel, A. Fraser, M. Freitag, Y. Graham, R. Grundkiewicz, P. Guzman, B. Haddow, M. Huck, A. Jimeno Yepes, T. Kocmi, A. Martins, M. Morishita, C. Monz, M. Nagata, T. Nakazawa, M. Negri, A. Néveol, M. Neves, M. Popel, M. Turchi, and M. Zampieri, editors, Proceedings of the Seventh Conference on Machine Translation (WMT), pages 578–585, Abu Dhabi, United Arab Emirates (Hybrid), Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.wmt-1.52. +[64] D. Rein, B. L. Hou, A. C. Stickland, J. Petty, R. Y. Pang, J. Dirani, J. Michael, and S. R. Bowman. GPQA: A graduate-level google-proof Q&A benchmark, 2023. URL https://arxiv.org/abs/2311.12022. +[65] Runway Research, 2024. URL https://runwayml.com/research/introducing-gen-3-alpha. +[66] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. L. Denton, K. Ghasemipour, R. Gontijo Lopes, B. Karagol Ayan, T. Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022. +[67] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom. Toolformer: Language models can teach themselves to use tools. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Yacmpz84TH. +[68] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms, 2017. +[69] U. Shaham, M. Ivgi, A. Efrat, J. Berant, and O. Levy. ZeroSCROLLS: A zero-shot benchmark for long text understanding. In H. Bouamor, J. Pino, and K. Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 7977-7989, Singapore, Dec. 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.536. URL https://aclanthology.org/2023-findings-emnlp.536. +[70] A. Singh, V. Natarajan, M. Shah, Y. Jiang, X. Chen, D. Batra, D. Parikh, and M. Rohrbach. Towards VQA models that can read. In CVPR, 2019. +[71] K. Sun, K. Huang, X. Liu, Y. Wu, Z. Xu, Z. Li, and X. Liu. T2V-CompBench: A comprehensive benchmark for compositional text-to-video generation. arXiv preprint arXiv:2407.14505, 2024. +[72] M. Suzgun, N. Scales, N. Scharli, S. Gehrmann, Y. Tay, H. W. Chung, A. Chowdhery, Q. V. Le, E. H. Chi, D. Zhou, , and J. Wei. Challenging BIG-Bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022. +[73] N. Team, M. R. Costa-jussa, J. Cross, O. Celebi, M. Elbayad, K. Heafield, K. Heffernan, E. Kalbassi, J. Lam, D. Licht, J. Maillard, A. Sun, S. Wang, G. Wenzek, A. Youngblood, B. Akula, L. Barrault, G. M. Gonzalez, P. Hansanti, J. Hoffman, S. Jarrett, K. R. Sadagopan, D. Rowe, S. Spruit, C. Tran, P. Andrews, N. F. Ayan, S. Bhosale, S. Edunov, A. Fan, C. Gao, V. Goswami, F. Guzmán, P. Koehn, A. Mourachko, C. Ropers, S. Saleem, H. Schwenk, and J. Wang. No language left behind: Scaling human-centered machine translation. 2022. +[74] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need, 2023. URL https://arxiv.org/abs/1706.03762. +[75] R. Vedantam, C. L. Zitnick, and D. Parikh. CIDEr: Consensus-based Image Description Evaluation. In CVPR, 2015. +[76] A. Wang, R. Y. Pang, A. Chen, J. Phang, and S. R. Bowman. SQuALITY: Building a long-document summarization dataset the hard way. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 1139–1156, Abu Dhabi, United Arab Emirates, Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.75. + +[77] W. Wang, Z. He, W. Hong, Y. Cheng, X. Zhang, J. Qi, X. Gu, S. Huang, B. Xu, Y. Dong, et al. LVBench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024. +[78] X. Wang, J. Wu, J. Chen, L. Li, Y.-F. Wang, and W. Y. Wang. VATEX: A large-scale, high-quality multilingual dataset for video-and-language research. In ICCV, 2019. +[79] J. Wei, X. Wang, D. Schuurmans, M. Bosma, B. Ichter, F. Xia, E. H. Chi, Q. V. Le, and D. Zhou. Chain-of-thought prompting elicits reasoning in large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, NIPS '22, Red Hook, NY, USA, 2024. Curran Associates Inc. ISBN 9781713871088. +[80] J. Xu, X. Liu, Y. Wu, Y. Tong, Q. Li, M. Ding, J. Tang, and Y. Dong. ImageReward: Learning and evaluating human preferences for text-to-image generation. Advances in Neural Information Processing Systems, 36, 2024. +[81] F. Yan, H. Mao, C. C.-J. Ji, T. Zhang, S. G. Patil, I. Stoica, and J. E. Gonzalez. Berkeley function calling leaderboard. 2024. +[82] X. Yang, K. Sun, H. Xin, Y. Sun, N. Bhalla, X. Chen, S. Choudhary, R. D. Gui, Z. W. Jiang, Z. Jiang, L. Kong, B. Moran, J. Wang, Y. E. Xu, A. Yan, C. Yang, E. Yuan, H. Zha, N. Tang, L. Chen, N. Scheffer, Y. Liu, N. Shah, R. Wanga, A. Kumar, W. tau Yih, and X. L. Dong. Crag – comprehensive rag benchmark. arXiv preprint arXiv:2406.04744, 2024. URL https://arxiv.org/abs/2406.04744. +[83] S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao. ReAct: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023. +[84] J. Yu, Y. Xu, J. Y. Koh, T. Luong, G. Baid, Z. Wang, V. Vasudevan, A. Ku, Y. Yang, B. K. Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022. +[85] X. Yue, Y. Ni, K. Zhang, T. Zheng, R. Liu, G. Zhang, S. Stevens, D. Jiang, W. Ren, Y. Sun, C. Wei, B. Yu, R. Yuan, R. Sun, M. Yin, B. Zheng, Z. Yang, Y. Liu, W. Huang, H. Sun, Y. Su, and W. Chen. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024. +[86] B. Zheng, B. Gou, J. Kil, H. Sun, and Y. Su. GPT-4V(ison) is a generalist web agent, if grounded. In ICML, 2024. +[87] L. Zheng, Z. Huang, Z. Xue, X. Wang, B. An, and S. Yan. AgentStudio: A toolkit for building general virtual agents. arXiv preprint arXiv:2403.17918, 2024. +[88] M. Zhong, A. Zhang, X. Wang, R. Hou, W. Xiong, C. Zhu, Z. Chen, L. Tan, C. Bi, M. Lewis, S. Popuri, S. Narang, M. Kambadur, D. Mahajan, S. Edunov, J. Han, and L. van der Maaten. Law of the weakest link: Cross capabilities of large language models. arXiv preprint arXiv:2409.19951, 2024. +[89] J. Zhou, T. Lu, S. Mishra, S. Brahma, S. Basu, Y. Luan, D. Zhou, and L. Hou. Instruction-following evaluation for large language models, 2023. URL https://arxiv.org/abs/2311.07911. + +# A Amazon Nova Canvas Capabilities + +Our Nova Canvas model offers the following functionalities, with examples given in Figure 5. + +- Text-to-image generation allows customers to create images with various resolutions (from $512 \times 512$ up to $2\mathrm{K} \times 2\mathrm{K}$ resolution). +- Editing allows developers to edit images using a combination of text prompt or mask image. Amazon Nova Canvas supports text-to-image editing and image-to-image editing, including inpainting, outpainting and object removal. +- Image variation allows customers to output images with similar contents but with variations from the user provided ones. +- Image conditioning provide a reference image along with a text prompt, resulting in outputs that follow the layout and structure of the user-supplied reference. +- Image guidance with color palette allows customers to precisely control the color palette of generated images by providing a list of hex codes along with the text prompt. +- Background removal automatically removes background from images containing multiple objects. + +![](images/76e139eb48ebbe67c6ae23af9d987841e615fe510fc47192d81ab31c91976ad0.jpg) +A dinosaur sitting in a tea cup + +![](images/b6eeeaf2872be27c90e24802e500673f2edd6c10373e7d61cccf86e50079d449.jpg) +(b) Inpainting the image with swans + +![](images/a0f9f9877678b8c10c66531b68e095c59e1aa8c5547165a942d51f5903c8cfd2.jpg) + +![](images/f067e42572ed86ce39b690f5084a986840fe5c7a607cd71a992dbb2c6eca7bd7.jpg) +(a) Image generation from a text prompt + +![](images/c5cb0ef9a0a9732d58e0989d6f1191010a2c9f768a1103091663080bd660e849.jpg) +change flowers to orange color + +![](images/bd620d782bd91e54f3c9b6c828c79489b562e1ef53f3d5fefaf88840c93693bb.jpg) +(d) Outpainting a new background + +![](images/9adcd93c3a348a977378541a3eb9005b49c67785526792f626c35ec61a8e75a6.jpg) + +![](images/797ec36ddec755043f50d9c4c3a8db04f6feb57495ebcfb57ee80e75e8722356.jpg) +(c) Image editing + +![](images/baa82e589e4f25edd6757b4df2d5b65509e507cf1609604b25a0fb5bbd27d127.jpg) +a hamster eats apple slice + +![](images/249480146ec018334d61af0070a064c6e4def2de70d75b161a1be51e5c5abf24.jpg) +A wooden boat in summer + +![](images/f1232a213ee459c8d311a66e6cfa0b449d50930b4ecc8696660599b3d68a90b4.jpg) + +![](images/21668492523acdf3cb014570bfb648a5eb6d1b6bd74d14386e769c83ee12b86c.jpg) +A jar of salad dressing in a rustic kitchen surrounded by fresh vegetables with studio lighting + +![](images/50ddd1488f51b07c178b9608ddaee633be7d742f40fcd2b07ebd87617516fae9.jpg) +(e) Style transfer +(g) Controlling the color palette +Figure 5: Example capabilities of Amazon Nova Canvas, our content generation model for images. + +![](images/a676fc75d5145c13756d1582514a2a8d24bb66faa47989ebc9fadd33faf862de.jpg) +(f) Guided generation +(h) Background Removal + +![](images/1c1166d99bcb5b4b726864c65e11014aa1fe34f34def76dd67e8b8644c2f3a38.jpg) + +# B Prompts and Scoring + +Prompt templates used for Amazon Nova evaluations are given below, along with those used for select other public models where noted. Additional materials and evaluation results from this report can be found at: + +https://huggingface.co.amazon-agi + +# B.1 Text evaluation + +# B.1.1 Language Understanding + +For MMLU: + +```txt +What is the correct answer to this question: +Choices: . Let's think step by step: +Based on the above, what is the single, most likely answer choice? Answer in the format "The correct answer is (insert answer here)." +``` + +For ARC-C: + +```txt +Given the following question and four candidate answers (A, B, C and D), choose the best answer. +Question: +Your response should end with "The best answer is [the_answer_letter]" where the [the_answer_letter] is one of A, B, C or D. +``` + +For DROP: + +We use the following 6 shots: + +```txt +- answer: $> -$ According to the passage, the European Coal and Steel Community was established in 1951 and became the EEC in 1958. 1958 - 1951 = 7. So the answer is 7 +passage: $> -$ Since the 1970s, U.S. governments have negotiated managed-trade agreements, such as the North American Free Trade Agreement in the 1990s, the Dominican Republic-Central America Free Trade Agreement in 2006, and a number of bilateral agreements. In Europe, six countries formed the European Coal and Steel Community in 1951 which became the European Economic Community in 1958. Two core objectives of the EEC were the development of a common market, subsequently renamed the single market, and establishing a customs union between its member states. question: How many years did the European Coal and Steel Community exist? +- answer: $> -$ According to the passage, $23.5\%$ ages 18 to 24. $23.5\%$ +passage: $> -$ In the county, the population was spread out with $23.50\%$ 18, $8.70\%$ $13.30\%$ +question: $> -$ How many more percent are under the age of 18 compared to the 18 to 24 group? +- answer: $> -$ According to the passage, Stafford threw 5 TD passes, 3 of which were to Johnson. $5 - 3 = 2$ . So the answer is 2 +passage: $> -$ Playing in their second straight Thanksgiving game, the Eagles struggled especially on defense, where they were unable to stop the much-hyped Lions offense. The worst of it all was how unproven rookie Eric Rowe was tasked +``` + +```txt +with covering wide receiver Calvin Johnson, leading to Johnson catching 3 +touchdowns. Stafford's five passing touchdowns, including three of them to +Johnson was too much for the Eagles to overcome and for the second +consecutive time this season, the Eagles gave up 45 points in a game. With +the loss, the Eagles drop to 4-7 on the season and 6-1 when playing on +Thanksgiving. +question: How many TD passes did Stafford throw other than to Johnson? +- answer: $>$ All the touchdown runs are: a 27-yard touchdown run, a 9-yard touchdown run, a 11-yard touchdown run. The smallest number among 27, 9, 11 is 9. So the shortest touchdown run was 9 yards. All the touchdown passes are: a 12-yard touchdown pass. So the longest touchdown pass was 12 yards. So the shortest touchdown run and the longest touchdown pass combine for $9 + 12 =$ 21 yards. So the answer is 21 passage: $>$ The Seahawks played the San Francisco 49ers. In the first quarter, the Hawks RB Julius Jones got a 27-yard TD run, along with DT Craig Terrill returning a fumble 9 yards for a touchdown. In the third quarter, the 49ers almost rallied as RB H. J. Torres made a 12-yard TD pass to Lucas Nelly, along with Mare kicking a 32-yard field goal. In the final quarter, Julius Jones got another 11-yard TD. question: $>$ How many yards do the shortest touchdown run and the longest touchdown pass combine for? +- answer: $>$ The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. So the Ravens had 10 points at halftime. So the answer is 10 passage: $>$ The Steelers went home for a duel with the Baltimore Ravens. Pittsburgh would deliver the opening punch in the first quarter with a 1-yard touchdown from running back Rashard Mendenhall. The Ravens would make it even as running back Willis McGahee got a 9-yard TD. The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. The Steelers brought the game into overtime with a 38-yard field goal by Andrew Foster. The Ravens Billy Cundiff pulled off a winning 33-yard field goal in overtime. question: How many points did the Ravens have at halftime? +- answer: $>$ The first and third quarters were the scoreless quarters. So there are 2 scoreless quarters. So the answer is 2 passage: $>$ The Vikings flew to Bank of America Stadium to face the Carolina Panthers. After a scoreless first quarter, Carolina got on the board with quarterback Matt Moore finding fullback Brad Hoover on a 1-yard TD pass. After yet another scoreless quarter, Carolina sealed the game as Matt Moore completed a 42-yard touchdown pass to wide receiver Steve Smith. question: How many scoreless quarters were there? +``` + +For each shot we provide the following instruction: + +Conclude your answer with: "So the answer is {final answer}". Make sure the final answer is in plain text format + +And we create each user prompt as follows: + +```xml + +``` + +For IFEval: + +No particular prompt was added (query was inputted to the model). + +For BBH: + +We use a preamble that describes the task, for example: + +Evaluate the result of a random Boolean expression. + +We then provide few shot examples in the following format: + +```txt +< preamble> +Question: + +Let's think step by step. +. So the answer is +``` + +And we follow this by the query: + +```txt +< preamble> +Question: + +Let's think step by step. +``` + +For each subject, We provide the subject-specific instructions as below: + +```yaml +- subject: booleanExpressions + instruction: Conclude your answer with: "So the answer is True or False." +- subject: causal_judgement + instruction: Conclude your answer with: "So the answer is Yes or No." +- subject: date_understanding + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: disambiguation_qa + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: dycklanguages + instruction: Correctly close a Dyck-n word. Conclude your answer with: "So the answer is {final answer}.". Make sure the final answer is in plain text format +- subject: formal_fallacies + instruction: Conclude your answer with: "So the answer is valid or invalid." +- subject: geometric_shapes + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: hyperbaton + instruction: Conclude your answer with: "\So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: logical_deductionfive Objects + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: logical_deduction-seven Objects + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: logical_deduction_three Objects + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: movie Recommendation + instruction: Conclude your answer with: "So the answer is (answer_letter)." . Where answer_letter is A, or B, or ... +- subject: multistep_arithmetic_two + instruction: Conclude your answer with: "So the answer is {final answer}.". Make sure the final answer is in plain text format +``` + +```yaml +- subject: navigate + instruction: Conclude your answer with: "So the answer is Yes or No". +- subject: object_counting + instruction: Conclude your answer with: "So the answer is .". Where is an integer +- subject: penguins_in_a_table + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: reasoning_about_colored Objects + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: ruin_names + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: salient Translation_error_detector + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: snarks + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: sports-understanding + instruction: Conclude your answer with: "So the answer is yes or no". +- subject: temporal_sequences + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: tracking_shuffledobjectsFive Objects + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: tracking_shuffledobjects-seven Objects + instruction: Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: tracking_shuffledobjects_three Objects + instruction: "Conclude your answer with: "So the answer is (answer_letter)". Where answer_letter is A, or B, or ... +- subject: web_of Lies + instruction: Conclude your answer with: "So the answer is Yes or No". +- subject: wordsorting + instruction: Conclude your answer with: "So the answer is word_1 word_2 ... word_n"." +``` + +# For GPQA: + +```txt +What is the correct answer to this question: +Choices: . Let's think step by step: +Based on the above, what is the single, most likely answer choice? Answer in the format "The correct answer is (insert answer here)." +``` + +# B.1.2 Mathematical Reasoning + +For MATH, GSM8K: + +```txt +Solve the following math problem step by step. Remember to put your answer inside \boxed{} +``` + +# B.1.3 Translation + +For Flores: + +Nova and LLama: + +Translate the following text into {tgt-lang}. Please output only the translated text with no prefix or introduction: {src} + +Gemini and GPT: + +Your job is to translate a sentence from {src-lang} into {tgt-lang}. Please output ONLY the translation and nothing else: {src} + +# B.1.4 Long Context + +For SQuALITY (ZeroScrolls Benchmark), we use the standard prompt template for Amazon Nova and Gemini models as in [69]: + +You are given a story and a question. Answer the question in a paragraph. + +Story: + + + +Question: + + + +Answer: + +# B.2 Multimodal evaluation + +# B.2.1 MMMU + +For multiple-choice questions: + +```txt +With the image, the following question, and the four possible answers (A, B, C and D), select the correct answer. (A) (B) ... (X) - For clear-cut questions: Give the answer directly with minimal elaboration. - For complex questions: Adopt this step-by-step method: ## Step 1: [Concise description] [Brief explanation] ## Step 2: [Concise description] [Brief explanation] In every scenario, conclude with: The best answer is [the_answer_letter]. where [ the_answer_letter] is one of A, B, C or D. Let's proceed with a systematic approach +``` + +For open-ended questions: + +With the image and the following question, provide a correct answer. + + + +- For clear-cut questions: Give the answer directly with minimal elaboration. +- For complex questions: Adopt this step-by-step method: + +Step 1: [Concise description] + +[Brief explanation] + +Step 2: [Concise description] + +[Brief explanation] + +In every scenario, conclude with: The best answer is [the_answer Phrase]. where [ the_answer Phrase] is a concise and direct answer to the question Let's proceed with a systematic approach. + +# B.2.2 ChartQA, DocVQA, and TextVQA + + + +Answer the question using a single word or phrase. + +# B.2.3 VATEX + +Render a clear and concise one-sentence summary of the video. The summary should be at least 10 words but no more than 20 words. Analyze the video first before summarizing it. Do not hallucinate objects. + +# B.2.4 EgoSchema + +You will be given a question about a video and three possible answer options. You will be provided frames from the video, sampled evenly across the video + +(A) +(B) +(C) + +Answer with the option's letter from the given choices directly. + +Answer with the option letter from the given choices directly. + +# B.2.5 VisualWebBench + +For the web captioning task: + +"You are given a screenshot of a webpage. Please generate the meta web description information of this webpage, i.e., content attribute in HTML element. + +You should use this format, and do not output any explanation or any other contents: + +For the heading OCR task: + +You are given a screenshot of a webpage. Please generate the main text within the screenshot, which can be regarded as the heading of the webpage. + +You should directly tell me the first sentence of the main content, and do not output any explanation or any other contents. + +For the web QA task: + + + +You should directly tell me your answer in the fewest words possible, and do not output any explanation or any other contents. + +For the element OCR task: + +You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is . + +Please perform OCR in the bounding box and recognize the text content within the red bounding box. + +For the action prediction task: + +You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is . + +Please select the best webpage description that matches the new webpage after clicking the selected element in the bounding box: + +You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents. + +For the element grounding task: + +In this website screenshot, I have labeled IDs for some HTML elements as candicates. Tell me which one best matches the description: + +You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents. + +For the action grounding task: + +In this website screenshot, I have labeled IDs for some HTML elements as candidates. Tell me which one I should click to complete the following task: + +You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents. + +# B.2.6 MM-Mind2Web + +Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() functions in playwright respectively). One next step means one operation within the three. + +You are asked to complete the following task: + +Previous Actions: + + + +The screenshot below shows the webpage you see. + +Follow the following guidance to think step by step before outlining the next action step at the current stage: + +(Current Webpage Identification) + +Firstly, think about what the current webpage is. + +(Previous Action Analysis) + +Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step. + +# (Screenshot Details Analysis) + +Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done. + +# (Next Action Based on Webpage and Analysis) + +Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation. + +To be successful, it is important to follow the following rules: + +1. You should only issue a valid action given the current observation. +2. You should only issue one action at a time. + +# (Reiteration) + +First, reiterate your next target element, its detailed location, and the corresponding operation. + +# (Multichoice Question) + +Below is a multi-choice question, where the choices are elements in the webpage. From the screenshot, find out where and what each one is on the webpage. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by reexamining the screenshot, the choices, and your further reasoning. + +If none of these elements match your target element, please select, select . + +None of the other options match the correct element. + +. None of the other options match the correct element. + +(Final Answer)Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines. + +# Format: + +ELEMENT: The uppercase letter of your choice. + +ACTION: Choose an action from {CLICK, TYPE, SELECT, NONE}. Use NONE only if you choose option F for the ELEMENT + +VALUE: Provide additional input based on ACTION. + +# The VALUE means: + +If ACTION == TYPE, specify the text to be typed. + +If ACTION == SELECT, specify the option to be chosen. + +If ACTION == CLICK, write "None". + +# B.2.7 GroundUI-1K + +Which action should I do if I want to Click on and where is the action? Express the location coordinates using the (x1, y1, x2, y2) format, scaled between 0 and 1000. + +# B.3 Functional Capabilities + +# B.3.1 FinQA + +Given the following finance question, analyze the question in details step-by-step before giving the final answer. Your answer should begin with "Lets think step-by-step". Your response should end with "The answer is [the_final_answer]", where [the_final_answer] should be the most concise answer without any explanation. + +```txt +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +```java +``` +``` +`` +``` + +We use regex "The answer is (.*)" to extract the answer. We convert answers with percent signs and magnitude terms to decimal numerical representation (e.g. convert $1.3\%$ to 0.013 and "5.2 millions" to 5,200,000). An answer is correct if it is identical to the ground truth when rounded to the same decimal places. + +# B.3.2 RAG + +```txt +You are a teacher grading a quiz. +You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. +Example Format: +QUESTION: question here +STUDENT ANSWER: student's answer here +TRUE ANSWER: true answer here +GRADE: Correct or Incorrect here +Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! +QUESTION: {query} +STUDENT ANSWER: {answer} +TRUE ANSWER: {expected_answer} +GRADE: +Your response should be in json format as follows: +{{ "justification": (Without mentioning the student/teacher framing of this prompt, explain why the STUDENT ANSWER is Correct or Incorrect. Use one or two sentences maximum. Keep the answer as concise as possible.), "grade": (correct or incorrect) +}} +``` + +# C Qualitative examples of multimodal intelligence + +![](images/b6fee84e3dec1c634c54caa6be6cd5718dbb5c0e2717596731f7713ff1f6cffc.jpg) + +# User input + +This looks like an amazing site. What is it and where is it located? Are there other fun things around this site to explore? + +![](images/40ed9725a10bde768a14e4c23a1bbeaa48fa7bb2624851ec2ecc717d04925b21.jpg) +Figure 6: Created with Nova Pro. Photo taken by a team member. + +![](images/8a0926b7ab13709954d88c169fa4b58825725dd4d4f17f64bb0c74906113cf3a.jpg) + +# Response + +This is the Space Needle, a landmark in Seattle, Washington, USA. It is a well-known observation tower and a symbol of Seattle. It was built for the 1962 World's Fair and has since become a popular tourist attraction. The Space Needle offers panoramic views of the city, the surrounding mountains, and the Puget Sound. + +The Space Needle is located in the Seattle Center, which is a cultural and entertainment hub. The Seattle Center is home to many other attractions, including the Chihuly Garden and Glass, the Museum of Pop Culture, and the Pacific Science Center. The Seattle Center also hosts events like concerts, festivals, and art exhibitions. The Seattle Center is also close to other popular attractions in Seattle, such as the Pike Place Market, the Seattle Aquarium, and the Seattle Art Museum. The city is known for its vibrant arts and culture scene, outdoor activities, and beautiful natural surroundings. + +# User input: "Search for Psych and buy the first episode" + +# Agent step 1 + +![](images/f62b7b8f0fa10516c22fcbbd7182f1adbd2b48c22b345d2038ebbd505fb4475e.jpg) + +# Agent step 2 + +![](images/40017c96e300bbe4f19d2bff766d3a5a22bc1aac134344ab06f81425b38ce48d.jpg) + +# Agent step 3 + +![](images/e2b9071e056e37ac4eb9d430bd3f923d778aeea4fa88dcd30d9470cf968999bb.jpg) +Figure 7: Example of a multimodal agent. + +# Agent step 4 + +![](images/b32c411bb61018a8cbea165020cca783de3eec6926bb4ae55ba05b11185084fc.jpg) + +![](images/d0909d37150b6e506c3322688439c181301d0021cd13c9fc9c174a56e6668fa5.jpg) + +# User input + +What happened to my tire? What should I do? + +![](images/ac9cea2d55627a6fb2b6475d2b322f234e90b3b7231e196768db56042240a551.jpg) +Figure 8: Created with Nova Pro. Image source: [88]. + +![](images/4182d1ad86670b1b607652228cf65789d3f9bb027a78f59a88228ade9b455e2d.jpg) + +# Response + +It appears that your tire has a significant puncture, as evidenced by the deflated state and visible damage. Here's what you should do: + +1. Assess the Damage: Confirm the puncture by checking for any visible holes or debris embedded in the tire. +2. Check Tire Pressure: Ensure the tire is completely deflated. +3. Remove the Tire: If you're comfortable, you can remove the wheel to inspect the puncture more closely. +4. Temporary Fix: Use a tire repair kit to patch the hole if it's small. This is a temporary solution. +5. Professional Repair: Visit a tire shop for a proper repair or replacement. They can assess if the tire can be patched or if a new tire is needed. +6. Inspect Other Tires: Check the other tires for any signs of damage or wear. + +Would you like more detailed instructions on any of these steps? + +![](images/11c83a4fc91e912143be2cb657581d8025a865186a75371afd4135014166cf8f.jpg) + +# User input + +Group this sales data by month using pandas and then create a bar chart of the sales per month. + +
MonthSales
Jan1523.13
Jan957
Feb133
March452
May521
June901
July1334
July131
Aug2601
Dec3005
+ +![](images/14520d6be03d8ce44c8e422a387e8ae19c3963854102147343ff7ee1426a2348.jpg) +Generated plot: + +![](images/aa02909025c79a2e3bc10826fb45b862b4bcc1dd1005746e68c4bf07a0a94712.jpg) + +# Response + +```python +import pandas as pd +import matplotlib.pyplot as plt +# Data +data $=$ { Month':['Jan','Jan','Feb','March','May','June','July' July','Aug','Dec'], Sales':[1523.13,957,133,452,521,901,1334,131,2601,3005] } +# Create DataFrame df $=$ pd.DataFrame(data) +# Group by month and sum sales monthly_sales $=$ df.groupby('Month')[Sales'].sum().reset_index() +# Sort months based on calendar monthly_sales['Month'] $=$ pd.Categorical( monthly_sales['Month'], categories $\coloneqq$ ['Jan','Feb','March','April','May','June','July', Aug,'Sept','Oct','Nov','Dec'], ordered=True +monthsales $=$ monthly_sales.sort_values('Month') +# Plot bar chart plt.figure(figsize=(10,6)) plt.bar(monthly_sales['Month'], monthly_sales['Sales'], color $\equiv$ 'blue') plt.xlabel('Month') pltylabel('Total Sales') plt.title('Total Sales per Month') plt.show() +``` + +![](images/ca75d134a16e3155d8778b1af440402be2f8402314bc6cd2b56a644be95182cb.jpg) + +# User input + +How many people say the European Union responded well to the pandemic? + +# Poor marks for America's response to coronavirus outbreak + +% who say each has done a job dealing with the coronavirus outbreak + +![](images/e537c2f1ffe8ae9930b85ad5babd789496b6b51b9653a1c9bf7e807992b5257a.jpg) +Figure 10: Created with Nova Lite. Image source: ChartQA dataset [50]. + +Note: Percentages are medians based on 13 countries. Those who did not answer are not shown. Source: Summer 2020 Global Attitudes Survey. Q10a-e. U.S. Image Plummets Internationally as Most Say Country Has Handled Coronavirus Badly + +PEW RESEARCH CENTER + +![](images/ac91511bde96ff7f67aab4256d93ae2e68faf1e7ea3de954ee8f09249bf14491.jpg) +Figure 9: Created with Nova Pro. + +# Response + +The chart shows that $57\%$ of people say the European Union responded well to the pandemic. This is indicated by the green bar labeled "Good" for the EU, which extends to a height of $57\%$ . This suggests that a majority of the people surveyed had a positive view of the EU's response to the pandemic. + +# D Correspondence and Contributors + +Please direct all correspondences to: + +nova-technical-report@amazon.com + +The Nova family of models were built by the Amazon Artificial General Intelligence (AGI) organization and partner teams. + +When citing this report, please use "Amazon AGI" as the sole author, as shown in the bibtex entry below. + +```bib +@misc{novatechreport, author = {Amazon AGI}, title = {The Amazon Nova Family of Models: Technical Report and Model Card}, year = {2024}, url = {https://www.amazon.science/publications/the-amazon-nova-family-of-models-technical-report-and-model-card} } +``` + +# D.1 Contributors + +The following individuals worked in the Nova program for at least one-fifth of its duration and measurably impacted one or more of the models or services described in this report. + +Aaron Langford + +Aayush Shah + +Abhanshu Gupta + +Abhimanyu Bhatter + +Abhinav Goyal + +Abhinav Mathur + +Abhinav Mohanty + +Abhishek Kumar + +Abhishek Sethi + +Abi Komma + +Abner Pena + +Achin Jain + +Adam Kunysz + +Adam Opyrchal + +Adarsh Singh + +Aditya Rawal + +Adok Achar Budihal Prasad + +Adrià de Gispert + +Agnika Kumar + +Aishwarya Aryamane + +Ajay Nair + +Akilan M + +Akshaya Iyengar + +Akshaya Vishnu Kudlu Shanbhogue + +Alan He + +Alessandra Cervone + +Alex Loeb + +Alex Zhang + +Alexander Fu + +Alexander Lisnichenko + +Alexander Zhipa + +Alexandros Potamianos + +Ali Kebarighotbi + +Aliakbar Daronkolaei + +Alok Parmesh + +Amanjot Kaur Samra + +Ameen Khan + +Amer Rez + +Amir Saffari + +Amit Agarwalla + +Amit Jhindal + +Amith Mamidala + +Ammar Asmro + +Amulya Ballakur + +Anand Mishra + +Anand Sridharan + +Anastasiia Dubinina + +Andre Lenz + +Andreas Doerr + +Andrew Keating + +Andrew Leaver + +Andrew Smith + +Andrew Wirth + +Andy Davey + +Andy Rosenbaum + +Andy Sohn + +Angela Chan + +Aniket Chakrabarti + +Anil Ramakrishna + +Anirban Roy + +Anita Iyer + +Anjali Narayan-Chen + +Ankith Yennu + +Anna Dabrowska + +Anna Gawlowska + +Anna Rumshisky + +Anna Turek + +Anoop Deoras + +Anton Bezruchkin + +Anup Prasad + +Anupam Dewan + +Anwith Kiran + +Apoory Gupta + +Aram Galstyan + +Aravind Manoharan + +Arijit Biswas + +Arindam Mandal + +Arpit Gupta + +Arsamkhan Pathan + +Arun Nagarajan + +Arushan Rajasekaram + +Arvind Sundararajan + +Ashwin Ganesan + +Ashwin Swaminathan + +Athanasios Mouchtaris + +Audrey Champeau + +Avik Ray + +Ayush Jaiswal + +Ayush Sharma + +Bailey Keefer + +Balamurugan Muthiah + +Beatrix Leon-Millan + +Ben Koopman + +Ben Li + +Benjamin Biggs + +Benjamin Ott + +Bhanu Vinzamuri + +Bharath Venkatesh + +Bhavana Ganesh + +Bhoomit Vasani + +Bill Byrne + +Bill Hsu + +Bincheng Wang + +Blake King + +Blazej Gorny + +Bo Feng + +Bo Zheng + +Bodhisattwa Paul + +Bofan Sun + +Bofeng Luo + +Bowen Chen + +Bowen Xie + +Boya Yu + +Brendan Jugan + +Brett Panosh + +Brian Collins + +Brian Thompson + +Can Karakus + +Can Liu + +Carl Lambrecht + +Carly Lin + +Carolyn Wang + +Carrie Yuan + +Casey Loyda + +Cezary Walczak + +Chalapathi Choppa + +Chandana Satya Prakash + +Chankrisna Richy Meas + +Charith Peris + +Charles Recaido + +Charlie Xu + +Charul Sharma + +Chase Kernan + +Chayut Thanapirom + +Chengwei Su + +Chenhao Xu + +Chenhao Yin + +Chentao Ye + +Chenyang Tao + +Chethan Parameshwara + +Ching-Yun Chang + +Chong Li + +Chris Hench + +Chris Tran + +Christophe Dupuy + +Christopher Davis + +Christopher DiPersio + +Christos Christodoulopoulos + +Christy Li + +Chun Chen + +Claudio Delli Bovi + +Clement Chung + +Cole Hawkins + +Connor Harris + +Corey Ropell + +Cynthia He + +DK Joo + +Dae Yon Hwang + +Dan Rosen + +Daniel Elkind + +Daniel Pressel + +Daniel Zhang + +Danielle Kimball + +Daniil Sorokin + +Dave Goodell + +Davide Modolo + +Dawei Zhu + +Deepikaa Suresh + +Deepti Raga + +Denis Filimonov + +Denis Foo Kune + +Denis Romasanta Rodriguez + +Devamanyu Hazarika + +Dhananjay Ram + +Dhawal Parkar + +Dhawal Patel + +Dhwanil Desai + +Dinesh Singh Rajput + +Disha Sule + +Diwakar Singh + +Dmitriy Genzel + +Dolly Goldenberg + +Dongyi He + +Dumitru Hanciu + +Dushan Tharmal + +Dzmitry Siankovich + +Edi Cikovic + +Edwin Abraham + +Ekraam Sabir + +Elliott Olson + +Emmett Steven + +Emre Barut + +Eric Jackson + +Ethan Wu + +Evelyn Chen + +Ezhilan Mahalingam + +Fabian Triefenbach + +Fan Yang + +Fangyu Liu + +Fanzi Wu + +Faraz Tavakoli + +Farhad Khozeimeh + +Feiyang Niu + +Felix Hieber + +Feng Li + +First Elbey + +Florian Krebs + +Florian Saupe + +Florian Sprunken + +Frank Fan + +Furqan Khan + +Gabriela De Vincenzo + +Gagandeep Kang + +George Ding + +George He + +George Yeung + +Ghada Qaddoumi + +Giannis Karamanolakis + +Goeric Huybrechts + +Gokul Maddali + +Gonzalo Iglesias + +Gordon McShane + +Gozde Sahin + +Guangtai Huang + +Gukyeong Kwon + +Gunnar A. Sigurdsson + +Gurpreet Chadha + +Gururaj Kosuru + +Hagen Fuerstenau + +Hah Hah + +Haja Maideen + +Hajime Hosokawa + +Han Liu + +Han-Kai Hsu + +Hann Wang + +Hao Li + +Hao Yang + +Haofeng Zhu + +Haozheng Fan + +Harman Singh + +Harshavardhan Kaluvala + +Hashim Saeed + +He Xie + +Helian Feng + +Hendrix + +Hengzhi Pei + +Henrik Nielsen + +Hesam Ilati + +Himanshu Patel + +Hongshan Li + +Hongzhou Lin + +Hussain Raza + +Ian Cullinan + +Imre Kiss + +Inbarasan Thangamani + +Indrayani Fadnavis + +Ionut Teodor Sorodoc + +Irem Ertuerk + +Iryna Yemialyanava + +Ishan Soni + +Ismail Jelal + +Ivan Tse + +Jack FitzGerald + +Jack Zhao + +Jackson Rothgeb + +Jacky Lee + +Jake Jung + +Jakub Debski + +Jakub Tomczak + +James Jeun + +James Sanders + +Jason Crowley + +Jay Lee + +Jayakrishna Anvesh Paidy + +Jayant Tiwari + +Jean Farmer + +Jeff Solinsky + +Jenna Lau + +Jeremy Savareese + +Jerzy Zagorski + +Ji Dai + +Jiacheng (JC) Gu + +Jiahui Li + +Jian (Skyler) Zheng + +Jianhua Lu + +Jianhua Wang + +Jiawei Dai + +Jiawei Mo + +Jiaxi Xu + +Jie Liang + +Jie Yang + +Jim Logan + +Jimit Majmudar + +Jing Liu + +Jinghong Miao + +Jingru Yi + +Jingyang Jin + +Jiun-Yu Kao + +Jixuan Wang + +Jiyang Wang + +Joe Pemberton + +Joel Carlson + +Joey Blundell + +John Chin-Jew + +John He + +Jonathan Ho + +Jonathan Hueser + +Jonathan Lunt + +Jooyoung Lee + +Joshua Tan + +Joyjit Chatterjee + +Judith Gaspers + +Jue Wang + +Jun Fang + +Jun Tang + +Jun Wan + +Jun Wu + +Junlei Wang + +Junyi Shi + +Justin Chiu + +Justin Satriano + +Justin Yee + +Jwala Dhamala + +Jyoti Bansal + +Kai Zhen + +Kai-Wei Chang + +Kaixiang Lin + +Kalyan Raman + +Kanthashree Mysore Sathyendra + +Karabo Moroe + +Karan Bhandarkar + +Karan Kothari + +Karolina Owczarzak + +Karthick Gopalswamy + +Karthick Ravi + +Karthik Ramakrishnan + +Karthika Arumugam + +Kartik Mehta + +Katarzyna Konczalska + +Kavya Ravikumar + +Ke Tran + +Kochen Qin + +Kelin Li + +Kelvin Li + +Ketan Kulkarni + +Kevin Angelo Rodrigues + +Keyur Patel + +Khadige Abboud + +Kiana Hajebi + +Klaus Reiter + +Kris Schultz + +Krishna Anisetty + +Krishna Kotnana + +Kristen Li + +Kruthi Channamallikarjuna + +Krzysztof Jakubczyk + +Kuba Pierewoj + +Kunal Pal + +Kunwar Srivastav + +Kyle Bannerman + +Lahari Poddar + +Lakshmi Prasad + +Larry Tseng + +Laxmikant Naik + +Leena Chennuru Vankadara + +Lenon Minorics + +Leo Liu + +Leonard Lausen + +Leonardo F. R. Ribeiro + +Li Zhang + +Lili Gehorsam + +Ling Qi + +Lisa Bauer + +Lori Knapp + +Lu Zeng + +Lucas Tong + +Lulu Wong + +Luoxin Chen + +Maciej Rudnicki + +Mahdi Namazifar + +Mahesh Jaliminche + +Maira Ladeira Tanke + +Manasi Gupta + +Mandeep Ahlawat + +Mani Khanuja + +Mani Sundaram + +Marcin Leyk + +Mariusz Momotko + +Markus Boese + +Markus Dreyer + +Markus Mueller + +Mason Fu + +Mateusz Górski + +Mateusz Mastalerczyk + +Matias Mora + +Matt Johnson + +Matt Scott + +Matthew Wen + +Max Barysau + +Maya Bouerdassi + +Maya Krishnan + +Mayank Gupta + +Mayank Hirani + +Mayank Kulkarni + +Meganathan Narayanasamy + +Melanie Bradford + +Melanie Gens + +Melissa Burke + +Meng Jin + +Miao Chen + +Michael Denkowski + +Michael Heymel + +Michael Krestyaninov + +Michal Obirek + +Michalina Wichorowska + +Michal Miotk + +Milosz Watroba + +Mingyi Hong + +Mingzhi Yu + +Miranda Liu + +Mohamed Gouda + +Mohammad El-Shabani + +Mohammad Ghavamzadeh + +Mohit Bansal + +Morteza Ziyadi + +Nan Xia + +Nathan Susanj + +Nav Bhasin + +Neha Goswami + +Nehal Belgamwar + +Nicolas Anastassacos + +Nicolas Bergeron + +Nidhi Jain + +Nihal Jain + +Niharika Chopparapu + +Nik Xu + +Nikko Strom + +Nikolaos Malandrakis + +Nimisha Mishra + +Ninad Parkhi + +Ninareh Mehrabi + +Nishita Sant + +Nishtha Gupta + +Nitesh Sekhar + +Nithin Rajeev + +Nithish Raja Chidambaram + +Nitish Dhar + +Noor Bhagwagar + +Noy Konforty + +Omar Babu + +Omid Razavi + +Orchid Majumder + +Osama Dar + +Oscar Hsu + +Pablo Kvitca + +Pallavi Pandey + +Parker Seegmiller + +Patrick Lange + +Paul Ferraro + +Payal Motwani + +Pegah Kharazmi + +Pei Wang + +Pengfei Liu + +Peter Bradtke + +Peter Gotoz + +Peter Zhou + +Pichao Wang + +Piotr Poskart + +Pooja Sonawane + +Pradeep Natarajan + +Pradyun Ramadorai + +Pralam Shah + +Prasad Nirantar + +Prasanthi Chavali + +Prashan Wanigasekara + +Prashant Saraf + +Prashun Dey + +Pratyush Pant + +Prerak Pradhan + +Preyaa Patel + +Priyanka Dadlani + +Prudhvee Narasimha Sadha + +Qi Dong + +Qian Hu + +Qiaozi (QZ) Gao + +Qing Liu + +Quinn Lam + +Quynh Do + +R. Manmatha + +Rachel Willis + +Rafael Liu + +Rafal Ellert + +Rafal Kalinski + +Rafi Al Attrach + +Ragha Prasad + +Ragini Prasad + +Raguvir Kunani + +Rahul Gupta + +Rahul Sharma + +Rahul Tewari + +Rajaganesh Baskaran + +Rajan Singh + +Rajiv Gupta + +Rajiv Reddy + +Rajshekhar Das + +Rakesh Chada + +Rakesh Vaideeswaran Mahesh + +Ram Chandrasekaran + +Ramesh Nallapati + +Ran Xue + +Rashmi Gangadharaiah + +Ravi Rachakonda + +Renxian Zhang + +Rexhina Blloshmi + +Rishabh Agrawal + +Robert Enyedi + +Robert Lowe + +Robik Shrestha + +Robinson Piramuthu + +Rohail Asad + +Rohan Khanna + +Rohan Mukherjee + +Rohit Mittal + +Rohit Prasad + +Rohith Mysore Vijaya Kumar + +Ron Diamant + +Ruchita Gupta + +Ruiwen Li + +Ruoying Li + +RushabhFegade + +Ruxu Zhang + +Ryan Arbow + +Ryan Chen + +Ryan Gabbard + +Ryan Hoium + +Ryan King + +Sabarishkumar Iyer + +Sachal Malick + +Sahar Movaghati + +Sai Balakavi + +Sai Jakka + +Sai Kashyap Paruvelli + +Sai Muralidhar Jayanthi + +Saicharan Shriram Mujumdar + +Sainyam Kapoor + +Sajjad Beygi + +Saket Dingliwal + +Saleh Soltan + +Sam Ricklin + +Sam Tucker + +Sameer Sinha + +Samridhi Choudhary + +Samson Tan + +Samuel Broscheit + +Samuel Schulter + +Sanchit Agarwal + +Sandeep Atluri + +Sander Valstar + +Sanjana Shankar + +Sanyukta Sanyukta + +Sarthak Khanna + +Sarvpriye Khetrapal + +Satish Janakiraman + +Saumil Shah + +Saurabh Akolkar + +Saurabh Giri + +Saurabh Khandelwal + +Saurabh Pawar + +Saurabh Sahu + +Sean Huang + +Sejun Ra + +Senthilkumar Gopal + +Sergei Dobroshinsky + +Shadi Saba + +Shamik Roy + +Shamit Lal + +Shankar Ananthakrishnan + +Sharon Li + +Shashwat Srijan + +Shekhar Bhide + +Sheng Long Tang + +Sheng Zha + +Sheree Oraby + +Sherif Mostafa + +Shiqi Li + +Shishir Bharathi + +ShivamPrakash + +Shiyuan Huang + +Shreya Yembarwar + +Shreyas Pansare + +Shreyas Subramanian + +Shrijeet Joshi + +Shuai Liu + +Shuai Tang + +Shubham Chandak + +Shubham Garg + +Shubham Katiyar + +Shubham Mehta + +Shubham Srivastav + +Shuo Yang + +Siddalingesha D S + +Siddharth Choudhary + +Siddharth Singh Senger + +Simon Babb + +Sina Moeini + +Siqi Deng + +Siva Loganathan + +Slawomir Domagala + +Sneha Narkar + +Sneha Wadhwa + +Songyang Zhang + +Songyao Jiang + +Sony Trenous + +Soumajyoti Sarkar + +Soumya Saha + +Sourabh Reddy + +Sourav Dokania + +Spurthideepika Sandiri + +Spyros Matsoukas + +Sravan Bodapati + +Sri Harsha Reddy Wdaru + +Sridevi Yagati Venkateshdatta + +Srikanth Ronanki + +Srinivasan R Veeravanallur + +Sriram Venkatapathy + +Sriramprabhu Sankaraguru + +Sruthi Gorantla + +Sruthi Karuturi + +Stefan Schroedl + +Subendhu Rongali + +Subbasis Kundu + +Suhaila Shakiah + +Sukriti Tiwari + +Sumit Bharti + +Sumita Sami + +Sumith Mathew + +Sunny Yu + +Sunwoo Kim + +Suraj Bajirao Malode + +Susana Cumplido Riel + +Swapnil Palod + +Swastik Roy + +Syed Furqhan + +Tagyoung Chung + +Takuma Yoshitani + +Taojiannan Yang + +Tejaswi Chillakura + +Tejwant Bajwa + +Temi Lajumoke + +Thanh Tran + +Thomas Gueudre + +Thomas Jung + +Tianhui Li + +Tim Seemman + +Timothy Leffel + +Tingting Xiang + +Tirth Patel + +Tobias Domhan + +Tobias Falke + +Toby Guo + +Tom Li + +Tomasz Horsczaruk + +Tomasz Jedynak + +Tushar Kulkarni + +Tyst Marin + +Tytus Metrycki + +Tzu-Yen Wang + +Umang Jain + +Upendra Singh + +Utkarsh Chirimar + +Vaibhav Gupta + +Vanshil Shah + +Varad Deshpande + +Varad Gunjal + +Varsha Srikeshava + +Varsha Vivek + +Varun Bharadwaj + +Varun Gangal + +Varun Kumar + +Venkatesh Elango + +Vicente Ordonez + +Victor Soto + +Vignesh Radhakrishnan + +Vihang Patel + +Vikram Singh + +Vinay Varma Kolanuvada + +Vinayshekhar Bannihatti Kumar + +Vincent Auvray + +Vincent Cartillier + +Vincent Ponzo + +Violet Peng + +Vishal Khandelwal + +Vishal Naik + +Vishvesh Sahasrabudhe + +Vitaliy Korolev + +Vivek Gokuladas + +Vivek Madan + +Vivek Subramanian + +Volkan Cevher + +Vrinda Gupta + +Wael Hamza + +Wei Zhang + +Weitong Ruan + +Weiwei Cheng + +Wen Zhang + +Wenbo Zhao + +Wenyan Yao + +Wenzhuo Ouyang + +Wesley Dashner + +William Campbell + +William Lin + +Willian Martin + +Wyatt Pearson + +Xiang Jiang + +Xiangxing Lu + +Xiangyang Shi + +Xianwen Peng + +Xiaofeng Gao + +Xiaoge Jiang + +Xiaohan Fei + +Xiaohui Wang + +Xiaozhou Joey Zhou + +Xin Feng + +Xinyan Zhao + +Xinyao Wang + +Xinyu Li + +Xu Zhang + +Xuan Wang + +Xuandi Fu + +Xueling Yuan + +Xuning Wang + +Yadunandana Rao + +Yair Tavizon + +Yan Rossiytsev + +Yanbei Chen + +Yang Liu + +Yang Zou + +Yangsook Park + +Yannick Versley + +Yanyan Zhang + +Yash Patel + +Yen-Cheng Lu + +Yi Pan + +Yi-Hsiang (Sean) Lai + +Yichen Hu + +Yida Wang + +Yiheng Zhou + +Yilin Xiang + +Ying Shi + +Ying Wang + +Yishai Galatzer + +Yongxin Wang + +Yorick Shen + +Yuchen Sun + +Yudi Purwatama + +Yue (Rex) Wu + +Yue Gu + +Yuechun Wang + +Yujun Zeng + +Yuncong Chen + +Yunke Zhou + +Yusheng Xie + +Yvon Guy + +Zbigniew Ambrozinski + +Zhaowei Cai + +Zhen Zhang + +Zheng Wang + +Zhenghui Jin + +Zhewei Zhao + +Zhiheng Li + +Zhiheng Luo + +Zhikang Zhang + +Zhilin Fang + +Zhiqi Bu + +Zhiyuan Wang + +Zhizhong Li + +Zijian Wang + +Zimeng (Chris) Qiu + +Zishi Li + +# D.2 Acknowledgements + +We would like to acknowledge the following individuals who supported the development of the Nova models and services during the Nova program. + +Abdelrahman Badawy + +Abtin Rasoulian + +Adam Baranowski + +Aishwarya Kore + +Aishwarya Padmakumar + +Alain Krok + +Alex Mould + +Alex Sun + +Alexandros Papangelis + +
Alfred Shen
Amaran Asokkumar
Amiya Chakraborty
Anastasios Alexandridis
Angeliki Metallinou
Anila Joshi
Anup Katariya
Arda Keskiner
Avinash Venkatagiri
Aya Elzoheiry
Baishali Chaudhury
Ben Friebe
Bigad Soleiman
Bob Li
Brad Porter
Brian Chou
Brian Yost
Burak Gozluklu
Chad Connally
Chris Azer
Chris Beauchene
Chris Greenwood
Chris Johnson
Clay Cheng
Craig Rowland
Di Jin
Di Wu
Diego Socolinsky
Don Kretsch
Dylan Martin
Emma Lister
Eva Lasarcyk
Evan Kravitz
Federico D'Alessio
Flora Wang
Francisco Calderon Rodriguez
Gamaleldin Elsayed
Gaurav Rele
Gaurav Sukhatme
Gourav Datta
Hadrien Glaude
Hanbo Wang
Hans Hoeijmaker
Haotian An
Harpreet Cheema
Harshit Pande
Hongbin Zheng
Huda Khayrallah
+ +
Isaac Privitera
Jacob Zhiyuan Fang
Jady Liu
Jae Oh Woo
Jamal Saboune
James Park
Jianbo Yuan
Jianwei Feng
Jie Li
Jinwoo Park
Johan Esbjourner
Jonathan makunga
JoonHyung Kim
Jorge Beltran
Jose Garrido Ramas
Julie Baca
Justin Lewis
Kamran Razi
Kangyan Liu
Kasana Mahesh
Kelvin Qian
Kyle Goehner
Kyle Saggar
Laith Al-Saadoon
Lei Sun
Lily Liao
Long Chen
Lukacs Ablonczy
Luke Luneau
Maciej Eichler
Mallory McManamo
Manju Arakere
Matt McCoy
Matthew Chang
Meghal Varia
Meghana Ashok
Melanie Li
Mifu Suzuki
Negin Sokhandan
Nick Biso
Nico Bishop
Nicolle Borges
Palash Goyal
Parker Coleman
Paul Sumarokov
Pavel Kveton
Philipp Lerche
Pratibha Kumari
+ +
Rahul Agarwal
Rahul Ghosh
Rahul Kulkarni
Raj Kumar
Ramana Keerthi
Rams Sundaram
Raymond Fang
Reethika Kesani
Ryan Razkenari
Sarath Krishnan
Scott Patten
Seokhwan Kim
Sepehr Eghbali
Sergey Pugachev
Sertan Alkan
Shailav Taneja
Sheamus Punch
Shikib Mehri
Shilpa Singh
Shraddha Ravishankar
Sijia Liu
Sitanshu Gupta
Sol Vesdapunt
Spencer Romo
Sravya Uppu
Srivani Kambhampati
Stephanie Xie
Sujitha Martin
Sungjin Lee
Sungmin Hong
Tanner McRae
Thomas Patterson
Tina Li
Tom Liang
Trong Nguyen
Vasudev Mahesh Purandare
Vidya Sagar Ravipati
Vu San Ha Huynh
Weijuan Wu
Xiaolong Li
Xinyi Xu
Yaroslav Nechaev
Yuan Tian
Yunfei Bai
Zach Hille
Ziyan Tian
\ No newline at end of file diff --git a/data/2025/2506_12xxx/2506.12103/images/0cbd85126bd687b64f349529061c5f9e6d085266463731bb8ec5d4319a9c86c7.jpg b/data/2025/2506_12xxx/2506.12103/images/0cbd85126bd687b64f349529061c5f9e6d085266463731bb8ec5d4319a9c86c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb0cc60b5a692b07341ee6bde907f24905b343b2 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/0cbd85126bd687b64f349529061c5f9e6d085266463731bb8ec5d4319a9c86c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc6b2cc7fa3e54b97f849960ea03c6fc1077c005b52021d0aee434bd6fe23111 +size 26239 diff --git a/data/2025/2506_12xxx/2506.12103/images/104d1d3b30677a61aa977e3bfb470ed7db16f34aecc77a8a5a3e3f519f29ea89.jpg b/data/2025/2506_12xxx/2506.12103/images/104d1d3b30677a61aa977e3bfb470ed7db16f34aecc77a8a5a3e3f519f29ea89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5576ceaab7e202205fbadab0b1c0e79a8f02153 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/104d1d3b30677a61aa977e3bfb470ed7db16f34aecc77a8a5a3e3f519f29ea89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aaab6100d1ec83a3c464e4306abf5d2ad86b0f0012389433f6565340ab5ec52 +size 89337 diff --git a/data/2025/2506_12xxx/2506.12103/images/11c83a4fc91e912143be2cb657581d8025a865186a75371afd4135014166cf8f.jpg b/data/2025/2506_12xxx/2506.12103/images/11c83a4fc91e912143be2cb657581d8025a865186a75371afd4135014166cf8f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..691bfdf0d39402861dd44e555332bc0e0dbda71a --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/11c83a4fc91e912143be2cb657581d8025a865186a75371afd4135014166cf8f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79897d2cd370ab2754a0d49e309cdfe707b284f668a9a8b95d528a78817fcf7e +size 1014 diff --git a/data/2025/2506_12xxx/2506.12103/images/13929e9d9dc2fc9d6064a3e199a3df08286691b00bd5e5e3a8118591ab3ce293.jpg b/data/2025/2506_12xxx/2506.12103/images/13929e9d9dc2fc9d6064a3e199a3df08286691b00bd5e5e3a8118591ab3ce293.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ded2fe20fa2d947e250be9c2016be9000e8ea9c9 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/13929e9d9dc2fc9d6064a3e199a3df08286691b00bd5e5e3a8118591ab3ce293.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3a35661cab9abf727d9c93a42a703f4353454a364f80798715c2427caa64519 +size 196510 diff --git a/data/2025/2506_12xxx/2506.12103/images/14520d6be03d8ce44c8e422a387e8ae19c3963854102147343ff7ee1426a2348.jpg b/data/2025/2506_12xxx/2506.12103/images/14520d6be03d8ce44c8e422a387e8ae19c3963854102147343ff7ee1426a2348.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89177c63315a571090ec0efa0d4f15bf77f41625 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/14520d6be03d8ce44c8e422a387e8ae19c3963854102147343ff7ee1426a2348.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aecd78b8ced5f05494aacd384fdc4bc994694f8c21f7aa9e563ac87604310ce +size 14282 diff --git a/data/2025/2506_12xxx/2506.12103/images/1888da7922d07148b8348987b53dfc4837e982a06f12992d09585bdf6e01d4e8.jpg b/data/2025/2506_12xxx/2506.12103/images/1888da7922d07148b8348987b53dfc4837e982a06f12992d09585bdf6e01d4e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f4dc2f2e69b9c981aea41354e4d67c3655273e9 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/1888da7922d07148b8348987b53dfc4837e982a06f12992d09585bdf6e01d4e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f8bb27645b93c8afdee1a71a794cdb5ed8ebd0c2b0bcb266ca3be8c1ecb962d +size 16260 diff --git a/data/2025/2506_12xxx/2506.12103/images/1911bf0acd67bd15d0f2b53ab74ed3db72d3ea068b54888f5c0238b981b3c475.jpg b/data/2025/2506_12xxx/2506.12103/images/1911bf0acd67bd15d0f2b53ab74ed3db72d3ea068b54888f5c0238b981b3c475.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af4048ef20f92b76338615bce7092ae5395aafe2 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/1911bf0acd67bd15d0f2b53ab74ed3db72d3ea068b54888f5c0238b981b3c475.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d11036436e43a56e68acf665df4a0a022fa74f7333cc22fed28cefa7844726c4 +size 30521 diff --git a/data/2025/2506_12xxx/2506.12103/images/1c1166d99bcb5b4b726864c65e11014aa1fe34f34def76dd67e8b8644c2f3a38.jpg b/data/2025/2506_12xxx/2506.12103/images/1c1166d99bcb5b4b726864c65e11014aa1fe34f34def76dd67e8b8644c2f3a38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0156f443f6392ddcaedcdcb73229f4075f081ddd --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/1c1166d99bcb5b4b726864c65e11014aa1fe34f34def76dd67e8b8644c2f3a38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30e24357ccab2aa42721643a993e90757fc5f42a94054e65841e51885cad1e29 +size 8182 diff --git a/data/2025/2506_12xxx/2506.12103/images/21668492523acdf3cb014570bfb648a5eb6d1b6bd74d14386e769c83ee12b86c.jpg b/data/2025/2506_12xxx/2506.12103/images/21668492523acdf3cb014570bfb648a5eb6d1b6bd74d14386e769c83ee12b86c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..444b19842b7570c2d2023bf7d6a0a0a813aa8aca --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/21668492523acdf3cb014570bfb648a5eb6d1b6bd74d14386e769c83ee12b86c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d1288d37104ccfb6550f6e07da10d7e44b56f7b602342a95997f9c98bf6fa77 +size 1637 diff --git a/data/2025/2506_12xxx/2506.12103/images/249480146ec018334d61af0070a064c6e4def2de70d75b161a1be51e5c5abf24.jpg b/data/2025/2506_12xxx/2506.12103/images/249480146ec018334d61af0070a064c6e4def2de70d75b161a1be51e5c5abf24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdc8e3c1cccdba881c47a5c13fa044f649b6ed3f --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/249480146ec018334d61af0070a064c6e4def2de70d75b161a1be51e5c5abf24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:233833a6ff213eded5fded648e3c411c22fea579accce2f7470f9fa32dc1c59f +size 19433 diff --git a/data/2025/2506_12xxx/2506.12103/images/24cd5ce105f84b663074971e3ad6ce5928eb157a204bca249b33984a69b84712.jpg b/data/2025/2506_12xxx/2506.12103/images/24cd5ce105f84b663074971e3ad6ce5928eb157a204bca249b33984a69b84712.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f641673a66dfd1240fba175daafbaebe1a02b0d --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/24cd5ce105f84b663074971e3ad6ce5928eb157a204bca249b33984a69b84712.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:973b99de44f47af9408606ec87b091e2365016a2f68e7ac66cd77cd35b6e2fdb +size 36392 diff --git a/data/2025/2506_12xxx/2506.12103/images/3156a9077f1c972bfe8d4f5736cc7cb801a543c0a7e1872ae7041bb75bf072ce.jpg b/data/2025/2506_12xxx/2506.12103/images/3156a9077f1c972bfe8d4f5736cc7cb801a543c0a7e1872ae7041bb75bf072ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30104c8f0334cc31c8b1937dcf922fdb738da031 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/3156a9077f1c972bfe8d4f5736cc7cb801a543c0a7e1872ae7041bb75bf072ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37d06f217a6f9feaa6f2063973d0a523b9114424265fdf87030251f5200c0320 +size 41109 diff --git a/data/2025/2506_12xxx/2506.12103/images/3812b6443d8505c899375917bc3608643f3f56d03d8761c7851c7a38dc3c85ce.jpg b/data/2025/2506_12xxx/2506.12103/images/3812b6443d8505c899375917bc3608643f3f56d03d8761c7851c7a38dc3c85ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cea708e570f7960d6ccba2a212f81cac2b335a9f --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/3812b6443d8505c899375917bc3608643f3f56d03d8761c7851c7a38dc3c85ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760cde31a758a6411f56216673187cedb24fb160961b189c64c2021025af4f93 +size 52475 diff --git a/data/2025/2506_12xxx/2506.12103/images/40017c96e300bbe4f19d2bff766d3a5a22bc1aac134344ab06f81425b38ce48d.jpg b/data/2025/2506_12xxx/2506.12103/images/40017c96e300bbe4f19d2bff766d3a5a22bc1aac134344ab06f81425b38ce48d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb53a688d9f4f79082f6450d9e8940d73a8d9525 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/40017c96e300bbe4f19d2bff766d3a5a22bc1aac134344ab06f81425b38ce48d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d74232a16b2a1c2bc22a68d306437c328831db21cd8da81fc876194dd5716cb +size 48761 diff --git a/data/2025/2506_12xxx/2506.12103/images/40ed9725a10bde768a14e4c23a1bbeaa48fa7bb2624851ec2ecc717d04925b21.jpg b/data/2025/2506_12xxx/2506.12103/images/40ed9725a10bde768a14e4c23a1bbeaa48fa7bb2624851ec2ecc717d04925b21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4982742158bf48da37777ffe716e1a0fb07f97f --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/40ed9725a10bde768a14e4c23a1bbeaa48fa7bb2624851ec2ecc717d04925b21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7036d6a7e88006a5b586264015052c7027979f0ffce5c158efce09a8bf2d69cb +size 43369 diff --git a/data/2025/2506_12xxx/2506.12103/images/4182d1ad86670b1b607652228cf65789d3f9bb027a78f59a88228ade9b455e2d.jpg b/data/2025/2506_12xxx/2506.12103/images/4182d1ad86670b1b607652228cf65789d3f9bb027a78f59a88228ade9b455e2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e98339e59dde0d01b34eebbfed9690f80e7a39cf --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/4182d1ad86670b1b607652228cf65789d3f9bb027a78f59a88228ade9b455e2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65dcc8ac499bf915fefde1a3b9adf87688177848579aea798143e3730d0360e2 +size 1032 diff --git a/data/2025/2506_12xxx/2506.12103/images/45df84ea256037b222c86788c17751b23921927fe22cd2beade0beab39a07079.jpg b/data/2025/2506_12xxx/2506.12103/images/45df84ea256037b222c86788c17751b23921927fe22cd2beade0beab39a07079.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9775252b554484a21bb60c34fbcac1467b383bf4 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/45df84ea256037b222c86788c17751b23921927fe22cd2beade0beab39a07079.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34d05f23940e793068478a9f6fca62ba119976a22be40cca777749b55708acc3 +size 83276 diff --git a/data/2025/2506_12xxx/2506.12103/images/460ae686ce3b421bcd1418395cc5925da64a59db4ea8972139858cf04c0e9636.jpg b/data/2025/2506_12xxx/2506.12103/images/460ae686ce3b421bcd1418395cc5925da64a59db4ea8972139858cf04c0e9636.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c2916cfab89019cf3e563473947c4bb6e33ee38 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/460ae686ce3b421bcd1418395cc5925da64a59db4ea8972139858cf04c0e9636.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1baf251e29545376ca185f11595d12e325d78a3a2325eea573e34b2ca35227af +size 30842 diff --git a/data/2025/2506_12xxx/2506.12103/images/4754f9755877defa3dbe1fd9cd96eb21940d26e1151935c60631550c46e02dc5.jpg b/data/2025/2506_12xxx/2506.12103/images/4754f9755877defa3dbe1fd9cd96eb21940d26e1151935c60631550c46e02dc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ead648750dfdd0b6e578c7e20b30296b1e69fed --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/4754f9755877defa3dbe1fd9cd96eb21940d26e1151935c60631550c46e02dc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:606c5df2c154a7604fe1683f87c15ba06e2396f8eca53e99689228c7a57b7234 +size 58484 diff --git a/data/2025/2506_12xxx/2506.12103/images/4925649280fa45f14562e7b6baa58f4947d95e95c004bfe6d7380dc0f925a1f9.jpg b/data/2025/2506_12xxx/2506.12103/images/4925649280fa45f14562e7b6baa58f4947d95e95c004bfe6d7380dc0f925a1f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..365e6e6ca7c81c2bdca7bd05f71433f680f5e29c --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/4925649280fa45f14562e7b6baa58f4947d95e95c004bfe6d7380dc0f925a1f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f24ff7de544647970c2e4dbd23a0a7590379522de1353302bca7610c7b82dd7c +size 106285 diff --git a/data/2025/2506_12xxx/2506.12103/images/499a56f552f88cdf2d3fa5cc1b35e5ff796ec6798d845b6bc78b4780986c4b50.jpg b/data/2025/2506_12xxx/2506.12103/images/499a56f552f88cdf2d3fa5cc1b35e5ff796ec6798d845b6bc78b4780986c4b50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6ae8eb50bc93b9db2f676cf0924c039dee25bf2 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/499a56f552f88cdf2d3fa5cc1b35e5ff796ec6798d845b6bc78b4780986c4b50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f2b339d006634e48eaf1e7f4068d2bd2a1046690279d7a3c61f23b685ab5a12 +size 110495 diff --git a/data/2025/2506_12xxx/2506.12103/images/4c9b6e696a10b010149f3053995de7bcfebf5ebbcc7d50857945ce6d01b19f02.jpg b/data/2025/2506_12xxx/2506.12103/images/4c9b6e696a10b010149f3053995de7bcfebf5ebbcc7d50857945ce6d01b19f02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f9f8c6d6ad561aed5b5323cc90f3cae8d29f1c8 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/4c9b6e696a10b010149f3053995de7bcfebf5ebbcc7d50857945ce6d01b19f02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9eea40ae3bebe9a825f180854e0b30945d5ce65d9cba76567c53fa989c9b7dc +size 115569 diff --git a/data/2025/2506_12xxx/2506.12103/images/50ddd1488f51b07c178b9608ddaee633be7d742f40fcd2b07ebd87617516fae9.jpg b/data/2025/2506_12xxx/2506.12103/images/50ddd1488f51b07c178b9608ddaee633be7d742f40fcd2b07ebd87617516fae9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d238d40cbc580ebf65f3cf32bbfe0d6c8c599a5 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/50ddd1488f51b07c178b9608ddaee633be7d742f40fcd2b07ebd87617516fae9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1829f9f7d665b83a69f8eb9935885f5b4494b24df9514a576cc34298fd11247d +size 15879 diff --git a/data/2025/2506_12xxx/2506.12103/images/55f6921dfdfd6300177137ccb747563b22ab1a28a8d7fd0686e6d8523cfcaf2e.jpg b/data/2025/2506_12xxx/2506.12103/images/55f6921dfdfd6300177137ccb747563b22ab1a28a8d7fd0686e6d8523cfcaf2e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..240802af95e4f3de4bf28cc4eac8361bd2e7bc27 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/55f6921dfdfd6300177137ccb747563b22ab1a28a8d7fd0686e6d8523cfcaf2e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42015ad92059050bae5270355811fecb754ee02a25a10816f4cf0124365d2a9f +size 88183 diff --git a/data/2025/2506_12xxx/2506.12103/images/62810bb4a4f0c1a6ec5b253cc0bdfe5416772d4bd04d5e56463fb15a82e82c78.jpg b/data/2025/2506_12xxx/2506.12103/images/62810bb4a4f0c1a6ec5b253cc0bdfe5416772d4bd04d5e56463fb15a82e82c78.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a88d5228126c2295ab61eaef282d5a2afa024e0 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/62810bb4a4f0c1a6ec5b253cc0bdfe5416772d4bd04d5e56463fb15a82e82c78.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a16a952cf24531772255768466348f507f5fe3efa000c66e0d531f3b8d10d090 +size 23347 diff --git a/data/2025/2506_12xxx/2506.12103/images/6eb562a9d279dd55a2a329e5996f1b4fa88c3aa97c915eefdfe3c4fad694eb2a.jpg b/data/2025/2506_12xxx/2506.12103/images/6eb562a9d279dd55a2a329e5996f1b4fa88c3aa97c915eefdfe3c4fad694eb2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1542910b40fa81ea1435c21c92bb2ea89dfdc4d --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/6eb562a9d279dd55a2a329e5996f1b4fa88c3aa97c915eefdfe3c4fad694eb2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1772897611f3b7972233d4380d5eea7f46aedbfce7b7feea6ed718192bff44fc +size 11948 diff --git a/data/2025/2506_12xxx/2506.12103/images/744fe999753b87a0d695ae52fa9855a52df774d240e85120dba86e378b9958a8.jpg b/data/2025/2506_12xxx/2506.12103/images/744fe999753b87a0d695ae52fa9855a52df774d240e85120dba86e378b9958a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d92d6fc8f653d9ff3ca82a159548aaed35b2ef9e --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/744fe999753b87a0d695ae52fa9855a52df774d240e85120dba86e378b9958a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9396b23c194c5ef83e1a0aa74aa4773b1bd450e331dd285916dcea567cf9ad73 +size 109193 diff --git a/data/2025/2506_12xxx/2506.12103/images/76e139eb48ebbe67c6ae23af9d987841e615fe510fc47192d81ab31c91976ad0.jpg b/data/2025/2506_12xxx/2506.12103/images/76e139eb48ebbe67c6ae23af9d987841e615fe510fc47192d81ab31c91976ad0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d28b67919ace40c49fa6e84e7828272b33db86b4 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/76e139eb48ebbe67c6ae23af9d987841e615fe510fc47192d81ab31c91976ad0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7abffa5535e0498dac20ada69666df8c3329c9718c0599c14e20da665cf5c22 +size 14458 diff --git a/data/2025/2506_12xxx/2506.12103/images/797ec36ddec755043f50d9c4c3a8db04f6feb57495ebcfb57ee80e75e8722356.jpg b/data/2025/2506_12xxx/2506.12103/images/797ec36ddec755043f50d9c4c3a8db04f6feb57495ebcfb57ee80e75e8722356.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f96d238241af50ebb30b01b56241ab33cf58c76 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/797ec36ddec755043f50d9c4c3a8db04f6feb57495ebcfb57ee80e75e8722356.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb6fe3ff5c8945adad88da9084bdfb1d508ad68c52ec02e6e7450cd93af24d5 +size 11500 diff --git a/data/2025/2506_12xxx/2506.12103/images/7dad23d08aedb09972c224decfe86591bd5b6b4bfd30999e7dea4dd1df54b9ab.jpg b/data/2025/2506_12xxx/2506.12103/images/7dad23d08aedb09972c224decfe86591bd5b6b4bfd30999e7dea4dd1df54b9ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f575839439476cc46223d94bd65845d0b8d3531 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/7dad23d08aedb09972c224decfe86591bd5b6b4bfd30999e7dea4dd1df54b9ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eee34fca1699de9eddbea10b54fad4b8f3842b5fd4f43a8cf9e16c2719a1719d +size 74344 diff --git a/data/2025/2506_12xxx/2506.12103/images/8a0926b7ab13709954d88c169fa4b58825725dd4d4f17f64bb0c74906113cf3a.jpg b/data/2025/2506_12xxx/2506.12103/images/8a0926b7ab13709954d88c169fa4b58825725dd4d4f17f64bb0c74906113cf3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43dfeb9043d9ba0cae45a4e824bf757db41bb48c --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/8a0926b7ab13709954d88c169fa4b58825725dd4d4f17f64bb0c74906113cf3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68805d2e2a8e321dd8abce12b24b77ad368317740eb79b800999344ed41b0bdb +size 1027 diff --git a/data/2025/2506_12xxx/2506.12103/images/8be6bee72dd48d13efca48d766121833ca45d59e0535319fbd14117129e3befa.jpg b/data/2025/2506_12xxx/2506.12103/images/8be6bee72dd48d13efca48d766121833ca45d59e0535319fbd14117129e3befa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3dbf46747f4c0325c5e30abd05d3f228a298bbab --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/8be6bee72dd48d13efca48d766121833ca45d59e0535319fbd14117129e3befa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:436714c76d30e1c5e254af6af0488117263fd420cce72ea214ecf4e05d525ae5 +size 5942 diff --git a/data/2025/2506_12xxx/2506.12103/images/9adcd93c3a348a977378541a3eb9005b49c67785526792f626c35ec61a8e75a6.jpg b/data/2025/2506_12xxx/2506.12103/images/9adcd93c3a348a977378541a3eb9005b49c67785526792f626c35ec61a8e75a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7d29b4f94742d463312af6f45bb2fe930c28a6a --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/9adcd93c3a348a977378541a3eb9005b49c67785526792f626c35ec61a8e75a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:923db7599b6a3d714fd5a1ddc7acb377ba9b13391e83d5c3ed66a5ab606de17d +size 14916 diff --git a/data/2025/2506_12xxx/2506.12103/images/a0f9f9877678b8c10c66531b68e095c59e1aa8c5547165a942d51f5903c8cfd2.jpg b/data/2025/2506_12xxx/2506.12103/images/a0f9f9877678b8c10c66531b68e095c59e1aa8c5547165a942d51f5903c8cfd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59294684e7a6035a22eeaed0e0bbd5dc244228f0 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/a0f9f9877678b8c10c66531b68e095c59e1aa8c5547165a942d51f5903c8cfd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75531b31eeccd91b83262f2a3043f8057f86f3dafc01d60f1e33e65695eecc33 +size 9695 diff --git a/data/2025/2506_12xxx/2506.12103/images/a575838c54ef1139078534f10109c9a2c9a9db02f73565857edd5970bcc3d3d6.jpg b/data/2025/2506_12xxx/2506.12103/images/a575838c54ef1139078534f10109c9a2c9a9db02f73565857edd5970bcc3d3d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f13a1c0041e87ee107afbaee7ce5954acdd1e2c6 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/a575838c54ef1139078534f10109c9a2c9a9db02f73565857edd5970bcc3d3d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7354d7d42e5c9e14bc14c126118acf835d48db68ba9580e5d5f16a77800229bb +size 16304 diff --git a/data/2025/2506_12xxx/2506.12103/images/a676fc75d5145c13756d1582514a2a8d24bb66faa47989ebc9fadd33faf862de.jpg b/data/2025/2506_12xxx/2506.12103/images/a676fc75d5145c13756d1582514a2a8d24bb66faa47989ebc9fadd33faf862de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5ad7757ce60b239a605c56ecace73971c8f781d --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/a676fc75d5145c13756d1582514a2a8d24bb66faa47989ebc9fadd33faf862de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72fc08080957eacc2ddbdcdcb060090ae31ad95fd874ae4d5a9489d6cf037653 +size 13641 diff --git a/data/2025/2506_12xxx/2506.12103/images/aa02909025c79a2e3bc10826fb45b862b4bcc1dd1005746e68c4bf07a0a94712.jpg b/data/2025/2506_12xxx/2506.12103/images/aa02909025c79a2e3bc10826fb45b862b4bcc1dd1005746e68c4bf07a0a94712.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b22a52326d64e61e737f988baef789de50935cf4 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/aa02909025c79a2e3bc10826fb45b862b4bcc1dd1005746e68c4bf07a0a94712.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f27722b35582e60d2898304456502cc19318f0ae314789ff733f6ff2c3c2b09 +size 1024 diff --git a/data/2025/2506_12xxx/2506.12103/images/ac91511bde96ff7f67aab4256d93ae2e68faf1e7ea3de954ee8f09249bf14491.jpg b/data/2025/2506_12xxx/2506.12103/images/ac91511bde96ff7f67aab4256d93ae2e68faf1e7ea3de954ee8f09249bf14491.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fea48e1b9de75ebc4703862de93aa6b73c7c4088 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/ac91511bde96ff7f67aab4256d93ae2e68faf1e7ea3de954ee8f09249bf14491.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6ce1f74a6d506f0f857121163fa5f11b850937f0c3b586102625c997ad921c +size 1021 diff --git a/data/2025/2506_12xxx/2506.12103/images/ac9cea2d55627a6fb2b6475d2b322f234e90b3b7231e196768db56042240a551.jpg b/data/2025/2506_12xxx/2506.12103/images/ac9cea2d55627a6fb2b6475d2b322f234e90b3b7231e196768db56042240a551.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b663a6b69dc42f439a79dd87f2e2acc84848dce4 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/ac9cea2d55627a6fb2b6475d2b322f234e90b3b7231e196768db56042240a551.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fed5954a539d1acae88cbcf56897b8aa765350deb5c650602160f749a914671 +size 37649 diff --git a/data/2025/2506_12xxx/2506.12103/images/b32c411bb61018a8cbea165020cca783de3eec6926bb4ae55ba05b11185084fc.jpg b/data/2025/2506_12xxx/2506.12103/images/b32c411bb61018a8cbea165020cca783de3eec6926bb4ae55ba05b11185084fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8734917ada9c46472b9268eb0d3208a2ebfaa9aa --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/b32c411bb61018a8cbea165020cca783de3eec6926bb4ae55ba05b11185084fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f2b3b705713a49652c8655e7deff6c2d6d5037168333cb2f0f6705adb041ce0 +size 50497 diff --git a/data/2025/2506_12xxx/2506.12103/images/b6eeeaf2872be27c90e24802e500673f2edd6c10373e7d61cccf86e50079d449.jpg b/data/2025/2506_12xxx/2506.12103/images/b6eeeaf2872be27c90e24802e500673f2edd6c10373e7d61cccf86e50079d449.jpg new file mode 100644 index 0000000000000000000000000000000000000000..939a2943a56320b4e441248f656a353a9c6fc859 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/b6eeeaf2872be27c90e24802e500673f2edd6c10373e7d61cccf86e50079d449.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4566b1ec945b0472adf45a4ed6cf2ae3df367117586df7d78af1cf07948884b0 +size 9435 diff --git a/data/2025/2506_12xxx/2506.12103/images/b6fee84e3dec1c634c54caa6be6cd5718dbb5c0e2717596731f7713ff1f6cffc.jpg b/data/2025/2506_12xxx/2506.12103/images/b6fee84e3dec1c634c54caa6be6cd5718dbb5c0e2717596731f7713ff1f6cffc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..237de7f8d8350a628ecd5f2258e6848cb83c32a0 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/b6fee84e3dec1c634c54caa6be6cd5718dbb5c0e2717596731f7713ff1f6cffc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:771c87a1c1cf3f49aa9eb69211008bcb2caf16954b251cfcbc407a75d2951698 +size 1018 diff --git a/data/2025/2506_12xxx/2506.12103/images/baa82e589e4f25edd6757b4df2d5b65509e507cf1609604b25a0fb5bbd27d127.jpg b/data/2025/2506_12xxx/2506.12103/images/baa82e589e4f25edd6757b4df2d5b65509e507cf1609604b25a0fb5bbd27d127.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d6783b069b8770d1a3f2149c47fde656943226ee --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/baa82e589e4f25edd6757b4df2d5b65509e507cf1609604b25a0fb5bbd27d127.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:942f3fc635720afacad69ec5f07b11933b81c270b8a7d0beb9e9116f3a9c377e +size 12340 diff --git a/data/2025/2506_12xxx/2506.12103/images/bd620d782bd91e54f3c9b6c828c79489b562e1ef53f3d5fefaf88840c93693bb.jpg b/data/2025/2506_12xxx/2506.12103/images/bd620d782bd91e54f3c9b6c828c79489b562e1ef53f3d5fefaf88840c93693bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a669f15d32dc20cb6257b5d7d5e55186d22f457 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/bd620d782bd91e54f3c9b6c828c79489b562e1ef53f3d5fefaf88840c93693bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:667fada77f89bbd5142f48a9d9bec9d20357a60885ac56fb0e1142463192b032 +size 11121 diff --git a/data/2025/2506_12xxx/2506.12103/images/c5cb0ef9a0a9732d58e0989d6f1191010a2c9f768a1103091663080bd660e849.jpg b/data/2025/2506_12xxx/2506.12103/images/c5cb0ef9a0a9732d58e0989d6f1191010a2c9f768a1103091663080bd660e849.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e77965419afa838df2b52e1215c03cbceecf8d8f --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/c5cb0ef9a0a9732d58e0989d6f1191010a2c9f768a1103091663080bd660e849.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25943baeb8a43344cc15a192565671d13fad4fa3d45214aeade08ab6dbcc3a99 +size 10102 diff --git a/data/2025/2506_12xxx/2506.12103/images/ca75d134a16e3155d8778b1af440402be2f8402314bc6cd2b56a644be95182cb.jpg b/data/2025/2506_12xxx/2506.12103/images/ca75d134a16e3155d8778b1af440402be2f8402314bc6cd2b56a644be95182cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2e38e36840617f681ddfd8f07f83bdefc2f75d9 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/ca75d134a16e3155d8778b1af440402be2f8402314bc6cd2b56a644be95182cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be0e552ffdfb83702ecf9a4b77e477533b8e193936f22f7a14d4412a715990b9 +size 1018 diff --git a/data/2025/2506_12xxx/2506.12103/images/d0909d37150b6e506c3322688439c181301d0021cd13c9fc9c174a56e6668fa5.jpg b/data/2025/2506_12xxx/2506.12103/images/d0909d37150b6e506c3322688439c181301d0021cd13c9fc9c174a56e6668fa5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6dae040ea860f91d1c726eaa9ab69513085df0c --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/d0909d37150b6e506c3322688439c181301d0021cd13c9fc9c174a56e6668fa5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da882d4e5d281c8653cec2e9b50987cd506111ccc20f861c3e2bcdd0ca90221 +size 1027 diff --git a/data/2025/2506_12xxx/2506.12103/images/df5d97728a52aeb8f9891aed812b4ce2cc7084518fcd2dfcb1ac34d32988bbd2.jpg b/data/2025/2506_12xxx/2506.12103/images/df5d97728a52aeb8f9891aed812b4ce2cc7084518fcd2dfcb1ac34d32988bbd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ad833b41d507646a5888cf64c02af42be565189 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/df5d97728a52aeb8f9891aed812b4ce2cc7084518fcd2dfcb1ac34d32988bbd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61ee19078af290420f2f2b77491ae1d1a6ab1abc2f43a90e61c8f3e94374a355 +size 101947 diff --git a/data/2025/2506_12xxx/2506.12103/images/e2b9071e056e37ac4eb9d430bd3f923d778aeea4fa88dcd30d9470cf968999bb.jpg b/data/2025/2506_12xxx/2506.12103/images/e2b9071e056e37ac4eb9d430bd3f923d778aeea4fa88dcd30d9470cf968999bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72ce05baa33f2f2a07094036cc735cff45cc4a8e --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/e2b9071e056e37ac4eb9d430bd3f923d778aeea4fa88dcd30d9470cf968999bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f65902305e95f2f9bf78ab2e3ed193c6df548e839a6ae1372179add64cd5ba13 +size 64479 diff --git a/data/2025/2506_12xxx/2506.12103/images/e537c2f1ffe8ae9930b85ad5babd789496b6b51b9653a1c9bf7e807992b5257a.jpg b/data/2025/2506_12xxx/2506.12103/images/e537c2f1ffe8ae9930b85ad5babd789496b6b51b9653a1c9bf7e807992b5257a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87aab598f22e7a453e9e2a892643af9178957307 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/e537c2f1ffe8ae9930b85ad5babd789496b6b51b9653a1c9bf7e807992b5257a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c28d51f51cfb4e719114ce3d4b8cd3a02e6923762374501084e188f1cb750d6 +size 9014 diff --git a/data/2025/2506_12xxx/2506.12103/images/e774b73fa1e735c2e5327408e7138bc5a659ab582e779f6af3b14f94864a0daa.jpg b/data/2025/2506_12xxx/2506.12103/images/e774b73fa1e735c2e5327408e7138bc5a659ab582e779f6af3b14f94864a0daa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b3a756b7f5137075737ac9574bbfcdbed647111 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/e774b73fa1e735c2e5327408e7138bc5a659ab582e779f6af3b14f94864a0daa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d928057b7e75cd3fd5773bb7d0f140a536ae2693deb7553af6b401696c7df7b2 +size 65176 diff --git a/data/2025/2506_12xxx/2506.12103/images/e89c885cecd0fdc170940dc1b4a7578c536bfa8bd858aa091078589ec47fa28a.jpg b/data/2025/2506_12xxx/2506.12103/images/e89c885cecd0fdc170940dc1b4a7578c536bfa8bd858aa091078589ec47fa28a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa94a14ac4ee0be4cd326fe149b861414eaac103 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/e89c885cecd0fdc170940dc1b4a7578c536bfa8bd858aa091078589ec47fa28a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae9bf75cfb4834689b393b469dd55e7d5b362aec239413ed2fac4861e8b116ae +size 19678 diff --git a/data/2025/2506_12xxx/2506.12103/images/f067e42572ed86ce39b690f5084a986840fe5c7a607cd71a992dbb2c6eca7bd7.jpg b/data/2025/2506_12xxx/2506.12103/images/f067e42572ed86ce39b690f5084a986840fe5c7a607cd71a992dbb2c6eca7bd7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d37a2dfe372de717a1821fd692e61f1bb2d62c79 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/f067e42572ed86ce39b690f5084a986840fe5c7a607cd71a992dbb2c6eca7bd7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b065dfb3a8c2dcab37181313fd41360ea0e7967061fe95605284bb58c37af4a6 +size 11063 diff --git a/data/2025/2506_12xxx/2506.12103/images/f1232a213ee459c8d311a66e6cfa0b449d50930b4ecc8696660599b3d68a90b4.jpg b/data/2025/2506_12xxx/2506.12103/images/f1232a213ee459c8d311a66e6cfa0b449d50930b4ecc8696660599b3d68a90b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..170ca5b0db46644e1e63f6e9a93f1af91680c226 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/f1232a213ee459c8d311a66e6cfa0b449d50930b4ecc8696660599b3d68a90b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d08c99726b164fa4e64a00f8c440e1146be2c8ea851e3745f8cb34440edf0a7 +size 17643 diff --git a/data/2025/2506_12xxx/2506.12103/images/f62b7b8f0fa10516c22fcbbd7182f1adbd2b48c22b345d2038ebbd505fb4475e.jpg b/data/2025/2506_12xxx/2506.12103/images/f62b7b8f0fa10516c22fcbbd7182f1adbd2b48c22b345d2038ebbd505fb4475e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4df69e94eb6696018cd57e0900b3112dd8b65033 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/f62b7b8f0fa10516c22fcbbd7182f1adbd2b48c22b345d2038ebbd505fb4475e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9c142e6607cc1c28f9601b9f8241dd0a9e90fe1ae10f83622bdfe65c4120b9f +size 79670 diff --git a/data/2025/2506_12xxx/2506.12103/images/f66f29a61daabbd25d85f8a0f81690ced064d2fb9861e120cc24a4a32227f13c.jpg b/data/2025/2506_12xxx/2506.12103/images/f66f29a61daabbd25d85f8a0f81690ced064d2fb9861e120cc24a4a32227f13c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad6a73afb3ff0b4d6cdfd00833b92581ba73ecc7 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/f66f29a61daabbd25d85f8a0f81690ced064d2fb9861e120cc24a4a32227f13c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38ab4ce3ba93eba2fa6cf845664e5c09b16429a86b3b8c0975452d6f22b07c51 +size 54352 diff --git a/data/2025/2506_12xxx/2506.12103/images/f7dc2c775b272d735f1b4017fbebd6ba3845352c29ef471fa5da43d37bbacaae.jpg b/data/2025/2506_12xxx/2506.12103/images/f7dc2c775b272d735f1b4017fbebd6ba3845352c29ef471fa5da43d37bbacaae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7396c1ff30c69b058fe50a799c6c7701b2d02b38 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/images/f7dc2c775b272d735f1b4017fbebd6ba3845352c29ef471fa5da43d37bbacaae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3d6044051a824a7ede476bca50ead8f345f22292503494a24b3211cb22556f2 +size 81216 diff --git a/data/2025/2506_12xxx/2506.12103/layout.json b/data/2025/2506_12xxx/2506.12103/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f360b01d338f5f73ddb1d283ad375c7073eae556 --- /dev/null +++ b/data/2025/2506_12xxx/2506.12103/layout.json @@ -0,0 +1,51237 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 168, + 97, + 445, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 97, + 445, + 138 + ], + "spans": [ + { + "bbox": [ + 168, + 97, + 445, + 138 + ], + "type": "text", + "content": "The Amazon Nova Family of Models: Technical Report and Model Card" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 220, + 286, + 462 + ], + "blocks": [ + { + "bbox": [ + 69, + 220, + 286, + 462 + ], + "lines": [ + { + "bbox": [ + 69, + 220, + 286, + 462 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 286, + 462 + ], + "type": "image", + "image_path": "3156a9077f1c972bfe8d4f5736cc7cb801a543c0a7e1872ae7041bb75bf072ce.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 210, + 472, + 400, + 484 + ], + "lines": [ + { + "bbox": [ + 210, + 472, + 400, + 484 + ], + "spans": [ + { + "bbox": [ + 210, + 472, + 400, + 484 + ], + "type": "text", + "content": "Figure 1: The Amazon Nova family of models" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 326, + 274, + 541, + 340 + ], + "blocks": [ + { + "bbox": [ + 221, + 178, + 389, + 191 + ], + "lines": [ + { + "bbox": [ + 221, + 178, + 389, + 191 + ], + "spans": [ + { + "bbox": [ + 221, + 178, + 389, + 191 + ], + "type": "text", + "content": "Amazon Artificial General Intelligence" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 326, + 274, + 541, + 340 + ], + "lines": [ + { + "bbox": [ + 326, + 274, + 541, + 340 + ], + "spans": [ + { + "bbox": [ + 326, + 274, + 541, + 340 + ], + "type": "image", + "image_path": "1888da7922d07148b8348987b53dfc4837e982a06f12992d09585bdf6e01d4e8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 326, + 395, + 541, + 462 + ], + "blocks": [ + { + "bbox": [ + 326, + 395, + 541, + 462 + ], + "lines": [ + { + "bbox": [ + 326, + 395, + 541, + 462 + ], + "spans": [ + { + "bbox": [ + 326, + 395, + 541, + 462 + ], + "type": "image", + "image_path": "6eb562a9d279dd55a2a329e5996f1b4fa88c3aa97c915eefdfe3c4fad694eb2a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 525, + 330, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 525, + 330, + 537 + ], + "spans": [ + { + "bbox": [ + 281, + 525, + 330, + 537 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 549, + 506, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 549, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 549, + 506, + 671 + ], + "type": "text", + "content": "We present Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance. Amazon Nova Pro is a highly-capable multimodal model with the best combination of accuracy, speed, and cost for a wide range of tasks. Amazon Nova Lite is a low-cost multimodal model that is lightning fast for processing images, video, documents and text. Amazon Nova Micro is a text-only model that delivers our lowest-latency responses at very low cost. Amazon Nova Canvas is an image generation model that creates professional grade images with rich customization controls. Amazon Nova Reel is a video generation model offering high-quality outputs, customization, and motion control. Our models were built responsibly and with a commitment to customer trust, security, and reliability. We report benchmarking results for core capabilities, agentic performance, long context, functional adaptation, runtime performance, and human evaluation." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 212, + 36, + 560 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 212, + 36, + 560 + ], + "spans": [ + { + "bbox": [ + 14, + 212, + 36, + 560 + ], + "type": "text", + "content": "arXiv:2506.12103v1 [cs.AI] 17 Mar 2025" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 119, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 119, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 119, + 84 + ], + "type": "text", + "content": "Contents" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 93, + 541, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 93, + 541, + 104 + ], + "spans": [ + { + "bbox": [ + 70, + 93, + 541, + 104 + ], + "type": "text", + "content": "1 Introduction 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 105, + 541, + 125 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 86, + 105, + 541, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 105, + 541, + 114 + ], + "spans": [ + { + "bbox": [ + 86, + 105, + 541, + 114 + ], + "type": "text", + "content": "1.1 Amazon Nova Pro, Lite, and Micro 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 116, + 541, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 116, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 86, + 116, + 541, + 125 + ], + "type": "text", + "content": "1.2 Amazon Nova Canvas and Reel 3" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 136, + 541, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 136, + 541, + 147 + ], + "spans": [ + { + "bbox": [ + 69, + 136, + 541, + 147 + ], + "type": "text", + "content": "2 Amazon Nova Pro, Lite, and Micro Evaluations 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 85, + 148, + 541, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 148, + 541, + 157 + ], + "spans": [ + { + "bbox": [ + 85, + 148, + 541, + 157 + ], + "type": "text", + "content": "2.1 Core capability public benchmarks 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 159, + 541, + 179 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 159, + 541, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 159, + 541, + 168 + ], + "spans": [ + { + "bbox": [ + 107, + 159, + 541, + 168 + ], + "type": "text", + "content": "2.1.1 Core capability text benchmarks and results 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 169, + 541, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 169, + 541, + 179 + ], + "spans": [ + { + "bbox": [ + 107, + 169, + 541, + 179 + ], + "type": "text", + "content": "2.1.2 Core capability multimodal benchmarks and results 7" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 180, + 541, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 180, + 541, + 190 + ], + "spans": [ + { + "bbox": [ + 85, + 180, + 541, + 190 + ], + "type": "text", + "content": "2.2 Agentic workflows 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 191, + 541, + 213 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 108, + 191, + 541, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 191, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 108, + 191, + 541, + 201 + ], + "type": "text", + "content": "2.2.1 Agentic text benchmarks and results 9" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 202, + 541, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 202, + 541, + 213 + ], + "spans": [ + { + "bbox": [ + 107, + 202, + 541, + 213 + ], + "type": "text", + "content": "2.2.2 Agentic multimodal benchmarks and results 9" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 213, + 541, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 213, + 541, + 223 + ], + "spans": [ + { + "bbox": [ + 85, + 213, + 541, + 223 + ], + "type": "text", + "content": "2.3 Long context 10" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 85, + 224, + 541, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 224, + 541, + 235 + ], + "spans": [ + { + "bbox": [ + 85, + 224, + 541, + 235 + ], + "type": "text", + "content": "2.4 Functional expertise 11" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 236, + 541, + 267 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 108, + 236, + 541, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 236, + 541, + 245 + ], + "spans": [ + { + "bbox": [ + 108, + 236, + 541, + 245 + ], + "type": "text", + "content": "2.4.1 Software engineering 12" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 246, + 541, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 246, + 541, + 256 + ], + "spans": [ + { + "bbox": [ + 107, + 246, + 541, + 256 + ], + "type": "text", + "content": "2.4.2 Financial analysis 12" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 257, + 541, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 257, + 541, + 267 + ], + "spans": [ + { + "bbox": [ + 107, + 257, + 541, + 267 + ], + "type": "text", + "content": "2.4.3 Retrieval augmented generation 12" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 268, + 541, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 268, + 541, + 278 + ], + "spans": [ + { + "bbox": [ + 85, + 268, + 541, + 278 + ], + "type": "text", + "content": "2.5 Runtime performance 13" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 288, + 541, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 288, + 541, + 298 + ], + "spans": [ + { + "bbox": [ + 70, + 288, + 541, + 298 + ], + "type": "text", + "content": "3 Amazon Nova Canvas Evaluation 15" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 85, + 300, + 541, + 320 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 85, + 300, + 541, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 300, + 541, + 309 + ], + "spans": [ + { + "bbox": [ + 85, + 300, + 541, + 309 + ], + "type": "text", + "content": "3.1 Automated metrics 15" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 85, + 310, + 541, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 310, + 541, + 320 + ], + "spans": [ + { + "bbox": [ + 85, + 310, + 541, + 320 + ], + "type": "text", + "content": "3.2 Human evaluation 15" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 331, + 541, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 331, + 541, + 341 + ], + "spans": [ + { + "bbox": [ + 70, + 331, + 541, + 341 + ], + "type": "text", + "content": "4 Amazon Nova Reel Evaluation 16" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 85, + 342, + 541, + 374 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 85, + 342, + 541, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 342, + 541, + 352 + ], + "spans": [ + { + "bbox": [ + 85, + 342, + 541, + 352 + ], + "type": "text", + "content": "4.1 Human evaluation metrics 16" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 85, + 353, + 541, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 353, + 541, + 363 + ], + "spans": [ + { + "bbox": [ + 85, + 353, + 541, + 363 + ], + "type": "text", + "content": "4.2 Dataset 16" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 85, + 364, + 541, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 364, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 85, + 364, + 541, + 374 + ], + "type": "text", + "content": "4.3 Implementation details & results 17" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 384, + 541, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 384, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 70, + 384, + 541, + 395 + ], + "type": "text", + "content": "5 Responsible AI 17" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 85, + 396, + 541, + 439 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 85, + 396, + 541, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 396, + 541, + 407 + ], + "spans": [ + { + "bbox": [ + 85, + 396, + 541, + 407 + ], + "type": "text", + "content": "5.1 Defining our RAI objectives 17" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 85, + 407, + 541, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 407, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 85, + 407, + 541, + 417 + ], + "type": "text", + "content": "5.2 Ensuring adherence to RAI objectives 18" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 85, + 418, + 541, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 418, + 541, + 427 + ], + "spans": [ + { + "bbox": [ + 85, + 418, + 541, + 427 + ], + "type": "text", + "content": "5.3 RAI Evaluation 19" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 85, + 429, + 541, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 429, + 541, + 439 + ], + "spans": [ + { + "bbox": [ + 85, + 429, + 541, + 439 + ], + "type": "text", + "content": "5.4 Red Teaming 19" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 440, + 541, + 472 + ], + "type": "list", + "angle": 0, + "index": 40, + "blocks": [ + { + "bbox": [ + 108, + 440, + 541, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 440, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 108, + 440, + 541, + 449 + ], + "type": "text", + "content": "5.4.1 Internal Red Teaming 19" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 108, + 450, + 541, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 450, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 108, + 450, + 541, + 460 + ], + "type": "text", + "content": "5.4.2 External Red Teaming 20" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 107, + 461, + 541, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 461, + 541, + 472 + ], + "spans": [ + { + "bbox": [ + 107, + 461, + 541, + 472 + ], + "type": "text", + "content": "5.4.3 Automated Red Teaming 21" + } + ] + } + ], + "index": 39 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 481, + 541, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 481, + 541, + 493 + ], + "spans": [ + { + "bbox": [ + 70, + 481, + 541, + 493 + ], + "type": "text", + "content": "6 Training Infrastructure 21" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 70, + 502, + 541, + 576 + ], + "type": "list", + "angle": 0, + "index": 46, + "blocks": [ + { + "bbox": [ + 70, + 502, + 541, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 502, + 541, + 514 + ], + "spans": [ + { + "bbox": [ + 70, + 502, + 541, + 514 + ], + "type": "text", + "content": "A Amazon Nova Canvas Capabilities 28" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 70, + 522, + 541, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 522, + 541, + 535 + ], + "spans": [ + { + "bbox": [ + 70, + 522, + 541, + 535 + ], + "type": "text", + "content": "B Prompts and Scoring 30" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 70, + 543, + 541, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 543, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 70, + 543, + 541, + 555 + ], + "type": "text", + "content": "C Qualitative examples of multimodal intelligence 39" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 70, + 564, + 541, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 564, + 541, + 576 + ], + "spans": [ + { + "bbox": [ + 70, + 564, + 541, + 576 + ], + "type": "text", + "content": "D Correspondence and Contributors 43" + } + ] + } + ], + "index": 45 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 47 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 155, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 155, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 155, + 83 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 97, + 541, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 97, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 67, + 97, + 541, + 120 + ], + "type": "text", + "content": "This document introduces Amazon Nova, a new generation of state-of-the-art foundation models that deliver frontier intelligence and industry-leading price performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 134, + 244, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 134, + 244, + 146 + ], + "spans": [ + { + "bbox": [ + 69, + 134, + 244, + 146 + ], + "type": "text", + "content": "1.1 Amazon Nova Pro, Lite, and Micro" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 156, + 326, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 156, + 326, + 167 + ], + "spans": [ + { + "bbox": [ + 69, + 156, + 326, + 167 + ], + "type": "text", + "content": "Key capabilities of Amazon Nova Pro, Lite, and Micro include:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 178, + 541, + 422 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 96, + 178, + 541, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 178, + 541, + 277 + ], + "spans": [ + { + "bbox": [ + 96, + 178, + 541, + 277 + ], + "type": "text", + "content": "- Frontier intelligence: Amazon Nova models possess frontier intelligence, enabling them to understand and process complex language tasks with state-of-the-art accuracy. Amazon Nova Micro sets new standards in its intelligence tier in several text benchmarks such as Language Understanding (MMLU), Deep Reasoning (GPQA), Mathematics (MATH), and Multi-step Reasoning (Big-Bench Hard). Our multimodal models, Amazon Nova Pro and Lite, take text, images, documents, and video as input and generate text as output. These models set standards in several benchmarks such as Video Captioning (VATEX), Visual QA (TextVQA), Function Calling (BFCL), and multimodal agentic benchmarks (GroundUI-1K, VisualWebBench, Mind2Web) in their respective intelligence tiers. These models are the first to offer video understanding capabilities on Amazon Bedrock, enabling deeper insights from multimedia content." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 283, + 539, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 283, + 539, + 305 + ], + "spans": [ + { + "bbox": [ + 96, + 283, + 539, + 305 + ], + "type": "text", + "content": "- Speed: Amazon Nova has been designed for fast inference, with Amazon Micro, Lite, and Pro each being one of the fastest models in their respective intelligence tiers." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 311, + 539, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 311, + 539, + 355 + ], + "spans": [ + { + "bbox": [ + 96, + 311, + 539, + 355 + ], + "type": "text", + "content": "- Agentic Workflows: Amazon Nova Pro, Lite, and Micro can power AI agents capable of breaking down and executing multi-step tasks. These models are integrated with Bedrock Knowledge Bases and they excel at retrieval-augmented generation (RAG) to ensure the best accuracy by grounding their responses to the developer's data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 361, + 541, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 361, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 96, + 361, + 541, + 395 + ], + "type": "text", + "content": "- Customizability: Developers can fine-tune these models with multimodal data (Pro and Lite) or text data (Pro, Lite, and Micro), providing the flexibility to achieve desired accuracy, latency, and cost. Developers can also run self-service Custom Fine-Tuning (CFT) and distillation of larger models to smaller ones via Bedrock APIs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 399, + 539, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 399, + 539, + 422 + ], + "spans": [ + { + "bbox": [ + 96, + 399, + 539, + 422 + ], + "type": "text", + "content": "- Price-Performance: Each model was optimized to deliver exceptional price-performance value, offering state-of-the-art performance on key benchmarks at low cost." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 434, + 541, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 434, + 541, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 434, + 541, + 544 + ], + "type": "text", + "content": "Amazon Nova Pro, Lite, and Micro are based on the Transformer architecture [74]. Each model went through a series of training processes that began with pretraining using a mixture of large amounts of multilingual and multimodal data. Our models were trained on data from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. We curated data from over 200 languages, with particular emphasis on Arabic, Dutch, English, French, German, Hebrew, Hindi, Italian, Japanese, Korean, Portuguese, Russian, Simplified Chinese, Spanish, and Turkish. After pretraining, models iteratively went through a series of fine-tuning stages, including Supervised Fine-Tuning (SFT) on instruction-demonstration pairs (including multimodal ones) and reward model (RM) training from human preference data [59]. Finally, the models learned from human preferences via methods like Direct Preference Optimization (DPO) [62] and Proximal Policy Optimization (PPO) [68] to ensure that the final models are aligned with human preferences in both quality and responsibility." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 558, + 229, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 558, + 229, + 569 + ], + "spans": [ + { + "bbox": [ + 69, + 558, + 229, + 569 + ], + "type": "text", + "content": "1.2 Amazon Nova Canvas and Reel" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 579, + 541, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 579, + 541, + 602 + ], + "spans": [ + { + "bbox": [ + 67, + 579, + 541, + 602 + ], + "type": "text", + "content": "Amazon Nova Canvas and Amazon Nova Reel are designed to create realistic multimodal content, including images and videos, for a wide range of applications such as advertising, marketing, and entertainment." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 606, + 477, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 606, + 477, + 618 + ], + "spans": [ + { + "bbox": [ + 68, + 606, + 477, + 618 + ], + "type": "text", + "content": "Amazon Nova Canvas offers the following functionalities, with more details provided in Appendix A:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 96, + 628, + 539, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 96, + 628, + 539, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 628, + 539, + 671 + ], + "spans": [ + { + "bbox": [ + 96, + 628, + 539, + 671 + ], + "type": "text", + "content": "- Text-to-image generation: Amazon Nova Canvas can generate images with various resolutions (from 512 up to 2K horizontal resolution) and aspect ratios (any aspect ratio between 1:4 and 4:1 with a maximum of 4.2M pixels). Customers can provide reference images to guide the model to generate outputs in a specific style or color palette, or to generate variations of an image." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 96, + 677, + 539, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 677, + 539, + 721 + ], + "spans": [ + { + "bbox": [ + 96, + 677, + 539, + 721 + ], + "type": "text", + "content": "- Image editing: Amazon Nova Canvas allows precise image editing operations like inpainting and outpainting through natural language mask prompts. These mask prompts describe the specific area of the input image that needs to be repaired. The user can also easily change a background with the background removal feature leaving the subject of the image unchanged." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 294, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 294, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 294, + 84 + ], + "type": "text", + "content": "Amazon Nova Reel offers the following functionalities:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 93, + 539, + 188 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 96, + 93, + 538, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 93, + 538, + 114 + ], + "spans": [ + { + "bbox": [ + 96, + 93, + 538, + 114 + ], + "type": "text", + "content": "- Generate videos from a text prompt: Amazon Nova Reel can generate high-quality videos of 6-second duration (720p resolution at 24 frames per second) from a text prompt." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 96, + 118, + 539, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 118, + 539, + 140 + ], + "spans": [ + { + "bbox": [ + 96, + 118, + 539, + 140 + ], + "type": "text", + "content": "- Generate videos from a reference image and a prompt: Amazon Nova Reel brings images to motion and generates videos that are guided by the input image and a text prompt." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 144, + 539, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 144, + 539, + 188 + ], + "spans": [ + { + "bbox": [ + 96, + 144, + 539, + 188 + ], + "type": "text", + "content": "- Camera motion control using a text prompt: With camera motion control in Amazon Nova Reel, the user can guide camera motion with text prompts like \"zoom\" and \"dolly forward\" to get the exact visual needed for each video. Amazon Nova Reel supports more than 20 camera motions. For more details, please refer to our prompting guide1." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 198, + 541, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 198, + 541, + 297 + ], + "spans": [ + { + "bbox": [ + 68, + 198, + 541, + 297 + ], + "type": "text", + "content": "Amazon Nova Canvas and Reel are latent diffusion models [61] where a Variational AutoEncoder (VAE) [41] maps the image or video frames to latent variables on which the diffusion process happens. A text encoder tokenizes input text prompts into tokens which are then passed to the diffusion model as a conditioning signal. At inference time, a latent variable is initialized with random noise sampled from a Gaussian distribution, which is then denoised by the trained diffusion model iteratively into a clean latent variable. The clean latent variable is decoded back to images or video frames by the decoder of the VAE. Both models underwent a two-phased approach of pretraining and fine-tuning. Pretraining data were sourced from a variety of sources, including licensed data, proprietary data, open source datasets, and publicly available data where appropriate. Our highly scalable data filtering, dedduplication, and enrichment pipelines were based on AWS EMR [2] and AWS Batch [1], as well as other AWS services." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 710, + 319, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 710, + 319, + 722 + ], + "spans": [ + { + "bbox": [ + 83, + 710, + 319, + 722 + ], + "type": "text", + "content": "tok/secMMLUARC-CDROPGPQAMATHGSM8kIFEvalBBHaccuracyaccuracyF1-scoreaccuracyaccuracyaccuracyinstruction-level loose accuracyaccuracyNova Pro10085.994.8±1.385.4±0.746.9±4.676.6±1.294.8±1.292.1±1.886.9Nova Lite15780.592.4±1.580.2±0.842.0±4.673.3±1.294.5±1.289.7±2.182.4Nova Micro21077.690.2±1.779.3±0.840.0±4.569.3±1.392.3±1.487.2±2.379.50-shot CoT0-shot6-shot CoT0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoTClaude 3.5 Sonnet (Oct)5789.396.3M±1.188.3±0.658.0M±4.678.3±1.196.5M±1.090.2*±2.093.2Claude 3.5 Haiku6480.390.9M±1.683.1±0.837.5M±4.569.4±1.393.8M±1.385.9*±2.486.60-shot CoT25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shot CoTGemini 1.5 Pro (002)5885.995.4M±1.274.9±0.955.1M±4.686.5±0.990.8±1.691.7M±1.989.2Gemini 1.5 Flash (002)19078.994.3M±1.378.4±0.845.1M±4.677.9±1.286.2±1.991.6M±1.985.5Gemini 1.5 Flash 8B (001)28368.188.7M±1.868.1M±0.933.5M±4.458.7±1.484.5M±2.086.1M±2.369.55-shot25-shot3-shot0-shot4-shot11-shot0-shot3-shotGPT-4o16388.796.2M±1.183.4±0.748.4M±4.676.6±1.292.6M±1.489.8M±2.183.0MGPT-4o Mini11382.092.3M±1.579.7±0.841.7M±4.670.2±1.386.4M±1.887.4M±2.381.0M0-shot25-shot3-shot0-shot CoT0-shot CoT0-shot CoT0-shot3-shotLlama 3.2 90B4086.094.8±1.3-46.7±4.668.0±1.395.1±1.290.9M±2.0-Llama 3.2 11B12473.083.4±2.1-32.8±4.351.9±1.484.5±2.085.0M±2.4-Llama 3.1 8B15773.083.4±2.1-30.4±4.351.9±1.484.5±2.085.0M±2.4-0-shot CoT25-shot-0-shot CoT0-shot CoT8-shot CoT--", + "image_path": "13929e9d9dc2fc9d6064a3e199a3df08286691b00bd5e5e3a8118591ab3ce293.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "lines": [ + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "spans": [ + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "type": "text", + "content": "Table 1: Quantitative results on core capability benchmarks (MMLU [36], ARC-C [22], DROP [26], GPQA [64], MATH [37]), GSM8K [23], IFEval [89] and BigBench-Hard (BBH) [72]). Unless otherwise noted, all reference numbers are taken from the original technical reports and websites for Claude models [14, 11], GPT4 models [58, 57], Llama models [45] and Gemini models [32]. Results marked with " + }, + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "type": "text", + "content": " were measured by " + }, + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "type": "inline_equation", + "content": "\\mathrm{us}^2" + }, + { + "bbox": [ + 68, + 604, + 541, + 681 + ], + "type": "text", + "content": ". Claude numbers for IFEval (taken from [14]) are marked with an asterisk (*), as the scoring methodology is unspecified in the report. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 128 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 128 + ], + "type": "text", + "content": "Table 1 summarizes the quantitative results of Nova models and select public models on the aforementioned benchmarks for core capabilities. When available, we reference the highest publicly-reported numbers for each benchmark from the official technical reports and websites for Claude, Gemini, OpenAI and Llama family of models. Amazon Nova Pro, Lite, and Micro demonstrate strong performance across all benchmarks, showcasing their advanced core intelligence, particularly Amazon Nova Micro and Lite on math, reasoning, and instruction following benchmarks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 132, + 541, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 132, + 541, + 220 + ], + "spans": [ + { + "bbox": [ + 67, + 132, + 541, + 220 + ], + "type": "text", + "content": "We also evaluate the translation capabilities of Nova models. Flores200 [73, 34, 35], or simply Flores, is a machine translation benchmark consisting of translations from 842 distinct web articles, which tests the translation capabilities between English and non-English languages. Sentences are 21 words long on average. We use a 0-shot setup and report the macro average of two metrics, spBleu and COMET22 score [63] across a set of languages (Arabic, German, Spanish, French, Hindi, Italian, Japanese, Korean, Portuguese, Hebrew, Turkish, Simplified Chinese, Russian, Dutch) for translation from and into English. The prompts used for evaluation are summarized in Appendix B.1. Table 2 summarizes our quantitative results on Flores, demonstrating strong multilingual performance on translation for Amazon Nova Pro, Lite, and Micro." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 93, + 228, + 519, + 472 + ], + "blocks": [ + { + "bbox": [ + 93, + 228, + 519, + 472 + ], + "lines": [ + { + "bbox": [ + 93, + 228, + 519, + 472 + ], + "spans": [ + { + "bbox": [ + 93, + 228, + 519, + 472 + ], + "type": "table", + "html": "
FLORES (0-shot)
en → Set1Set1 → en
tok/secspBleu (↑)COMET22 (↑)spBleu (↑)COMET22 (↑)
Nova Pro10043.489.144.489.0
Nova Lite15741.588.843.188.8
Nova Micro21040.288.542.688.7
Claude 3.5 Sonnet (Oct)5742.5M89.4M43.5M89.1M
Claude 3.5 Haiku6440.0M88.5M40.2M88.3M
Gemini 1.5 Pro (002)5743.0M*89.1M*45.6M*89.1M*
Gemini 1.5 Flash (002)19040.0M*88.5M*42.9M*88.8M*
Gemini 1.5 Flash 8B (001)28338.2M*88.0M*41.4M*88.5M*
GPT-4o16343.1M*89.2M*43.9M*89.0M*
GPT-4o Mini11341.1M*88.7M*41.9M*88.7M*
Llama 3.2 90B4039.7M88.2M43.7M88.5M
Llama 3.2 11B12433.0M85.7M36.3M86.3M
Llama 3.1 8B15732.7M85.5M36.5M86.5M
", + "image_path": "df5d97728a52aeb8f9891aed812b4ce2cc7084518fcd2dfcb1ac34d32988bbd2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 475, + 544, + 521 + ], + "lines": [ + { + "bbox": [ + 67, + 475, + 544, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 475, + 544, + 521 + ], + "type": "text", + "content": "Table 2: Quantitative results on Flores200 [34], a machine translation benchmark. Set1 refers to {de, es, fr, it, pt, ja, ar, hi, ru, nl, tr, he, ko, zh}. Results marked with " + }, + { + "bbox": [ + 67, + 475, + 544, + 521 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 475, + 544, + 521 + ], + "type": "text", + "content": " were measured by us. Results marked with an asterisk (*) were obtained using an alternate prompt which can be found in Appendix B.1 Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 545, + 324, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 545, + 324, + 558 + ], + "spans": [ + { + "bbox": [ + 67, + 545, + 324, + 558 + ], + "type": "text", + "content": "2.1.2 Core capability multimodal benchmarks and results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 564, + 543, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 564, + 543, + 619 + ], + "spans": [ + { + "bbox": [ + 67, + 564, + 543, + 619 + ], + "type": "text", + "content": "In this section we evaluate the multimodal capabilities of Amazon Nova models on a diverse set of public benchmarks. Our selection of multimodal benchmarks aims to probe for various capabilities, including natural image understanding, document understanding with charts and graphs, text understanding, and temporal reasoning in videos. For all benchmarks, we follow the suggested metrics and choice of data split for evaluation. The following list briefly describes the selected benchmarks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 627, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 96, + 627, + 541, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 627, + 541, + 661 + ], + "spans": [ + { + "bbox": [ + 96, + 627, + 541, + 661 + ], + "type": "text", + "content": "- MMMU [85]: The Massive Multi-discipline Multimodal Understanding benchmark consists of college-level multiple-choice and open-ended questions from 30 different disciplines. We use Chain-of-Thought (CoT) prompting for this benchmark and report accuracy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 663, + 541, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 663, + 541, + 697 + ], + "spans": [ + { + "bbox": [ + 96, + 663, + 541, + 697 + ], + "type": "text", + "content": "ChartQA [50]: The 2,500 questions of this benchmark cover three different types of charts (bar, line and pie) and require strong visual, logical, and arithmetical reasoning capabilities. We evaluate on the test set and report relaxed accuracy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 700, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 700, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 96, + 700, + 541, + 723 + ], + "type": "text", + "content": "- DocVQA [51]: This benchmark probes capabilities on document analysis and recognition, including Optical Character Recognition (OCR). The 5,349 questions contain images from a diverse set of documents, ranging" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 69, + 541, + 294 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 541, + 294 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 541, + 294 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 541, + 294 + ], + "type": "table", + "html": "
MMMU (CoT)Chart QAcDoc VQAText VQAVATEXEgo Schema
tok/secvaltesttestvaltesttest
accuracyrelaxed accuracyANLSweighted accuracyCIDEraccuracy
Amazon Nova Pro10061.7 ±3.289.2 ±1.293.581.577.872.1 ±5.4
Amazon Nova Lite15756.2 ±3.286.8 ±1.392.480.277.871.4 ±5.4
Claude 3.5 Sonnet (Oct)5770.4 ±3.090.8 ±1.194.261.7M--
Claude 3 Haiku6450.2 ±3.382.0 ±1.588.8---
Gemini 1.5 Pro (001)5865.9 ±3.1E87.2 ±1.393.1B78.764.6A72.2 ±5.4
Gemini 1.5 Flash (001)19062.3 ±3.2E85.4 ±1.489.9B78.757.165.7 ±5.7
Gemini 1.5 Flash 8B (001)28353.7 ±3.3F78.2 ±1.6G73.666.753.2A-
GPT-4o (May)-69.1 ±3.085.7 ±1.492.877.2DM-72.2 ±5.4
GPT-4o Mini (Jul)11359.4 ±3.279.2 ±1.6M-70.3M--
Llama 3.2 90B4060.3 ±3.285.5 ±1.490.180.7M--
Llama 3.2 11B12450.7 ±3.383.4 ±1.588.471.3M--
", + "image_path": "499a56f552f88cdf2d3fa5cc1b35e5ff796ec6798d845b6bc78b4780986c4b50.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 67, + 297, + 541, + 396 + ], + "lines": [ + { + "bbox": [ + 67, + 297, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 67, + 297, + 541, + 396 + ], + "type": "text", + "content": "Table 3: Quantitative results on four image understanding benchmarks (MMMU [85], ChartQA [50], DocVQA [51], TextVQA [70]) and 2 video understanding benchmarks (VATEX [78] and EgoSchema [49]). Higher numbers are better for all benchmarks " + }, + { + "bbox": [ + 67, + 297, + 541, + 396 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 67, + 297, + 541, + 396 + ], + "type": "text", + "content": ". Unless otherwise noted, all evaluations are 0-shot and reference numbers are taken from the original technical reports and websites for Claude models [11, 12], GPT4 models [56, 55], Llama models [45, 53] and Gemini models [32, 33]. Remarks: (A) 4-shot evaluation; (B) External Optical Character Recognition (OCR) was used; (C) All models except Amazon Nova use CoT; (D) GPT-4o (Nov); (E) Gemini 1.5 Flash/Pro (002) models; (F) Reported in [33]; (G) Reported in [4]; (M) Claude 3.5 Sonnet and Llama 3.2 results for TextVQA as well as GPT4o and GPT4o mini results on ChartQA, TextVQA and VATEX were measured by us. Token generation speed in tokens per second (tok/sec), the inverse of per-token generation latency, is reproduced from Section 2.5." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 426, + 541, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 541, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 541, + 448 + ], + "type": "text", + "content": "from 1940 to 2020 and covering multiple industries. We report Average Normalized Levenshtein Similarity (ANLS)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 453, + 539, + 562 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 96, + 453, + 539, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 453, + 539, + 475 + ], + "spans": [ + { + "bbox": [ + 96, + 453, + 539, + 475 + ], + "type": "text", + "content": "- TextVQA [70]: The 5,000 samples of this dataset focus specifically on text-reading capabilities (OCR) in natural images. We report weighted accuracy on the validation set." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 480, + 539, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 480, + 539, + 512 + ], + "spans": [ + { + "bbox": [ + 96, + 480, + 539, + 512 + ], + "type": "text", + "content": "- VATEX [78]: This video captioning benchmark covers a diverse set of human activities. We evaluate on the public test set containing videos with a length of around 10 seconds. The CIDEr [75] score is used for evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 517, + 539, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 517, + 539, + 562 + ], + "spans": [ + { + "bbox": [ + 96, + 517, + 539, + 562 + ], + "type": "text", + "content": "- EgoSchema [49]: The unique characteristic of this long-form video question answering benchmark is its high \"certificate length\" [15], which is, loosely speaking, the time it takes a human to verify the video description. The videos cover a broad range of natural human activities and come with human-curated multiple-choice question-answer pairs." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 571, + 541, + 617 + ], + "lines": [ + { + "bbox": [ + 67, + 571, + 541, + 617 + ], + "spans": [ + { + "bbox": [ + 67, + 571, + 541, + 617 + ], + "type": "text", + "content": "Table 3 summarizes our quantitative results on multiple image and video understanding benchmarks. Amazon Nova Pro and Lite achieve high scores across all benchmarks. Chart understanding on ChartQA and video understanding on VATEX stand out, where Nova models rank either first or second. We provide the prompt templates for all benchmarks in Appendix B.2, as well as qualitative examples in Appendix C." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 629, + 173, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 629, + 173, + 641 + ], + "spans": [ + { + "bbox": [ + 69, + 629, + 173, + 641 + ], + "type": "text", + "content": "2.2 Agentic workflows" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 651, + 541, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 541, + 685 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 541, + 685 + ], + "type": "text", + "content": "Amazon Nova Pro, Lite, and Micro models can be used as agents. An agent considers a suite of tools and APIs, reasons about the user's request and past conversational history, chooses if a tool should be used and, if so, decides which tool to use, invokes the tool, assesses the outcome from the tool, and then communicates back with the user [83, 67, 46, 60]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "type": "text", + "content": "To this end, we evaluated our Nova models on agentic workflows that require textual understanding and visual reasoning. For textual understanding (Section 2.2.1), we used the Berkeley Function Calling Leaderboard benchmark to test our models' capabilities in function calling and orchestrating real-world applications. For visual reasoning (Section 2.2.2)," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 97 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 97 + ], + "type": "text", + "content": "we evaluate on three benchmarks that require image understanding capabilities for correct function calling. We highlight that both Amazon Nova Pro and Lite models set a new state of the art on these challenging benchmarks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 108, + 258, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 108, + 258, + 119 + ], + "spans": [ + { + "bbox": [ + 69, + 108, + 258, + 119 + ], + "type": "text", + "content": "2.2.1 Agentic text benchmarks and results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 126, + 541, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 126, + 541, + 183 + ], + "spans": [ + { + "bbox": [ + 67, + 126, + 541, + 183 + ], + "type": "text", + "content": "Table 4 presents quantitative results on the Berkeley Function Calling Leaderboard v3 (BFCL).3 Stemming from the Gorilla project [60], the revamped BFCL [81] benchmark evaluates a model's ability to accurately call and utilize real-world functions, or tools, based on a user's natural language request. Amazon Nova models particularly excel in the Abstract Syntax Tree (AST), Execution, and Relevance metrics, as well as overall scores versus comparable models. Amazon Nova Lite and Micro also had the lowest latency of the selected models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 186, + 541, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 186, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 186, + 541, + 232 + ], + "type": "text", + "content": "In Table 4, AST measures the exact match function calling performance of the model when comparing function names and argument/value signatures to a human-curated ground truth. While AST allows for some soft matching based on manually-defined, permitted argument values (e.g., different date formats), Execution measures a function call's accuracy not by the call signature itself, but by comparing the return value of the call when executed against a real API." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 236, + 541, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 236, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 236, + 541, + 281 + ], + "type": "text", + "content": "To measure the rate of hallucination, Irrelevance measures the model's ability to recognize that it does not have the appropriate functions available to help the user, and should therefore not call any. Relevance, as the opposite of irrelevance, measures the model's ability to recognize it indeed does have the functions necessary to help the user (but does not verify function signature accuracy). For both metrics, higher numbers are better." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 70, + 291, + 542, + 512 + ], + "blocks": [ + { + "bbox": [ + 70, + 291, + 542, + 512 + ], + "lines": [ + { + "bbox": [ + 70, + 291, + 542, + 512 + ], + "spans": [ + { + "bbox": [ + 70, + 291, + 542, + 512 + ], + "type": "table", + "html": "
OverallLatencyNon-LiveLiveMulti-TurnHallucination
accuracy(↑)seconds(↓)AST(↑)execution(↑)overall(↑)overall(↑)relevance(↑)irrelevance(↑)
Nova Pro68.41.090.189.871.545.195.165.1
Nova Lite66.60.687.586.466.050.397.649.1
Nova Micro56.20.587.289.767.415.587.857.6
Claude Sonnet 3.5 (Jun)61.33.970.066.374.740.068.374.6
Claude Haiku 340.41.541.747.557.720.697.629.4
Gemini 1.5 Pro (002)59.83.088.091.474.316.375.675.1
Gemini 1.5 Flash (002)55.31.179.780.673.212.578.175.7
Llama 3.2 90BA54.3N/A88.989.361.114.392.758.4
Llama 3.2 11BA49.9N/A83.687.357.910.578.141.6
GPT-4o (Aug)68.91.585.985.675.445.363.482.9
GPT-4o-mini (Jul)60.71.684.384.170.228.380.571.8
", + "image_path": "4c9b6e696a10b010149f3053995de7bcfebf5ebbcc7d50857945ce6d01b19f02.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 514, + 541, + 548 + ], + "lines": [ + { + "bbox": [ + 67, + 514, + 541, + 548 + ], + "spans": [ + { + "bbox": [ + 67, + 514, + 541, + 548 + ], + "type": "text", + "content": "Table 4: Results on the Berkeley Function Calling Leaderboard (BFCL) v3 as of the Nov 17th, 2024 update. We include the latest versions of the models available on the leaderboard at that time. (A) We use leaderboard results for Llama 3.1 8B and 70B for Llama 3.2 11B and 90B, respectively, given the shared text LLM." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 575, + 292, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 575, + 292, + 588 + ], + "spans": [ + { + "bbox": [ + 69, + 575, + 292, + 588 + ], + "type": "text", + "content": "2.2.2 Agentic multimodal benchmarks and results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 594, + 541, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 594, + 541, + 640 + ], + "spans": [ + { + "bbox": [ + 67, + 594, + 541, + 640 + ], + "type": "text", + "content": "The Amazon Nova Pro and Lite models provide native support for multimodal inputs, including agentic workflows. In this section, we present results from our models on three different benchmarks that require agents to navigate websites to solve real-world tasks. Websites are typically represented as screenshots in these datasets to correctly convey all style elements and visual data as rendered in a standard web browser." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 648, + 541, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 648, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 96, + 648, + 541, + 693 + ], + "type": "text", + "content": "- VisualWebBench [43]: This benchmark includes seven core tasks related to web browsing, including captioning, question answering, OCR, action prediction, and grounding. All models are evaluated on 1,536 samples that span more than 100 websites from 12 domains. The final metric is the average over different metrics for the individual core tasks." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "inline_equation", + "content": "{}^{3}" + }, + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "text", + "content": " BFCL is a fast-moving, live benchmark. We report results using the state of the repository and website leaderboard as of Nov 17th, 2024 (commit 8226d)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 307, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 96, + 72, + 538, + 183 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 96, + 72, + 538, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 72, + 538, + 126 + ], + "spans": [ + { + "bbox": [ + 96, + 72, + 538, + 126 + ], + "type": "text", + "content": "- MM-Mind2Web [86]: This extension of the original Mind2Web [24] benchmark links samples with the original website screenshots, making it multimodal. An agent needs to select an element and pick one of three elementary actions (click, type, or select) alongside a value for some actions. We report micro average over the per-sample step accuracy, where an agent is successful only if element and action selection, as well as the predicted value, are correct." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 130, + 538, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 130, + 538, + 183 + ], + "spans": [ + { + "bbox": [ + 96, + 130, + 538, + 183 + ], + "type": "text", + "content": "- GroundUI-1K [87]: This benchmark is composed of multiple existing datasets, including Mind2Web [24] and repurposes them as a grounding task. On 1,000 samples for evaluation, a multimodal agent is given an instruction and a screenshot of a website from a wide variety of domains and asked to predict the 2D location of the desired UI element. The agent is correct if its predicted 2D location is within the ground truth bounding box." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 116, + 233, + 493, + 434 + ], + "blocks": [ + { + "bbox": [ + 67, + 192, + 541, + 226 + ], + "lines": [ + { + "bbox": [ + 67, + 192, + 541, + 226 + ], + "spans": [ + { + "bbox": [ + 67, + 192, + 541, + 226 + ], + "type": "text", + "content": "Table 5 shows the results of our models on multimodal agent workflows along with other publicly-reported results. Both Amazon Nova models, Lite and Pro, demonstrate strong visual reasoning and agentic capabilities and achieve high scores on all three benchmarks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 116, + 233, + 493, + 434 + ], + "lines": [ + { + "bbox": [ + 116, + 233, + 493, + 434 + ], + "spans": [ + { + "bbox": [ + 116, + 233, + 493, + 434 + ], + "type": "table", + "html": "
VisualWebBench\ncompositEdMM-Mind2Web\nstep accuracyGroundUI-1K\naccuracy
Nova Pro79.763.781.4
Nova Lite77.760.780.2
Claude 3.5 Sonnet (Oct)76.7M61.6M16.3
GPT-4o (Nov)77.5M55.0M13.4C
GPT-4o Mini (Jul)71.3M58.6M7.2M
GPT-4 (Apr)64.636.8A-
Gemini 1.5 Pro (002)76.4M58.4M35.2B
Gemini 1.5 Flash (002)76.1M46.2M59.9M
Gemini 1.0 Pro (001)48.017.9A-
Llama 3.2 90B73.2M21.6M8.3M
Llama 3.2 11B65.1M22.1M3.7M
", + "image_path": "7dad23d08aedb09972c224decfe86591bd5b6b4bfd30999e7dea4dd1df54b9ab.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 437, + 541, + 491 + ], + "lines": [ + { + "bbox": [ + 67, + 437, + 541, + 491 + ], + "spans": [ + { + "bbox": [ + 67, + 437, + 541, + 491 + ], + "type": "text", + "content": "Table 5: Quantitative results on three multi-modal agentic benchmarks: VisualWebBench [43], MM-Mind2Web [86] and GroundUI-1K [87]. Reference numbers are taken from the corresponding benchmark papers [43, 86, 87] and leaderboard [3]. Remarks: (A) uses in-context learning (ICL) (please note that Amazon Nova models do not need to rely on in-context examples); (B) Gemini 1.5 Pro (001); (C) GPT-4o (May); (D) Macro average over individual metrics; (M) Measured by us." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 517, + 151, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 517, + 151, + 529 + ], + "spans": [ + { + "bbox": [ + 69, + 517, + 151, + 529 + ], + "type": "text", + "content": "2.3 Long context" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 536, + 541, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 536, + 541, + 593 + ], + "spans": [ + { + "bbox": [ + 67, + 536, + 541, + 593 + ], + "type": "text", + "content": "We evaluate Amazon Nova Pro, Lite, and Micro on tasks that require the models to understand and reason over long context. These skills are crucial for tasks such as long multi-turn conversations, reasoning over long lists of retrieved documents, or understanding long videos. Amazon Nova Micro, Lite, and Pro models support context lengths of 128k, 300k, and 300k tokens, respectively. We used the following benchmarks to evaluate our models' long context performance:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 599, + 539, + 703 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 96, + 599, + 538, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 599, + 538, + 643 + ], + "spans": [ + { + "bbox": [ + 96, + 599, + 538, + 643 + ], + "type": "text", + "content": "- Text Needle-in-a-Haystack (NIAH): Following [40], we assessed each model's ability to locate specific information (the \"needle\") within extensive contexts (the \"haystack\"). This \"needle-in-a-haystack\" test evaluates the model's performance on context lengths starting at " + }, + { + "bbox": [ + 96, + 599, + 538, + 643 + ], + "type": "inline_equation", + "content": "32\\mathrm{k}" + }, + { + "bbox": [ + 96, + 599, + 538, + 643 + ], + "type": "text", + "content": ", allowing us to measure its ability to accurately retrieve information across varying lengths of input context." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 646, + 539, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 646, + 539, + 667 + ], + "spans": [ + { + "bbox": [ + 96, + 646, + 539, + 667 + ], + "type": "text", + "content": "- SQuALITY [76] (ZeroScrolls Benchmark [69]): Focused on query-based summarization of literary stories, this task evaluates the model's capacity to generate relevant summaries from large contexts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 96, + 671, + 539, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 671, + 539, + 703 + ], + "spans": [ + { + "bbox": [ + 96, + 671, + 539, + 703 + ], + "type": "text", + "content": "- LVBench [77]: This multimodal benchmark includes questions about YouTube videos from various domains such as TV series, sports, broadcasts, and surveillance footage. The LVBench dataset consists of 99 videos and 1,549 questions, covering six different types of tasks such as reasoning, event understanding and summarization." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 710, + 315, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 710, + 315, + 722 + ], + "spans": [ + { + "bbox": [ + 82, + 710, + 315, + 722 + ], + "type": "text", + "content": "4https://huggingface.co/datasets/AIWinter/LVBench" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 132, + 76, + 208, + 257 + ], + "blocks": [ + { + "bbox": [ + 132, + 76, + 208, + 257 + ], + "lines": [ + { + "bbox": [ + 132, + 76, + 208, + 257 + ], + "spans": [ + { + "bbox": [ + 132, + 76, + 208, + 257 + ], + "type": "image", + "image_path": "a575838c54ef1139078534f10109c9a2c9a9db02f73565857edd5970bcc3d3d6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 266, + 541, + 291 + ], + "lines": [ + { + "bbox": [ + 68, + 266, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 68, + 266, + 541, + 291 + ], + "type": "text", + "content": "Figure 2: Text Needle-in-a-Haystack recall performance for Nova Micro (up-to 128k), Nova Lite (up-to 300k) and Nova Pro (up-to 300k) models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 219, + 76, + 321, + 257 + ], + "blocks": [ + { + "bbox": [ + 219, + 76, + 321, + 257 + ], + "lines": [ + { + "bbox": [ + 219, + 76, + 321, + 257 + ], + "spans": [ + { + "bbox": [ + 219, + 76, + 321, + 257 + ], + "type": "image", + "image_path": "62810bb4a4f0c1a6ec5b253cc0bdfe5416772d4bd04d5e56463fb15a82e82c78.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 342, + 76, + 471, + 257 + ], + "blocks": [ + { + "bbox": [ + 342, + 76, + 471, + 257 + ], + "lines": [ + { + "bbox": [ + 342, + 76, + 471, + 257 + ], + "spans": [ + { + "bbox": [ + 342, + 76, + 471, + 257 + ], + "type": "image", + "image_path": "0cbd85126bd687b64f349529061c5f9e6d085266463731bb8ec5d4319a9c86c7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 164, + 325, + 449, + 511 + ], + "blocks": [ + { + "bbox": [ + 164, + 325, + 449, + 511 + ], + "lines": [ + { + "bbox": [ + 164, + 325, + 449, + 511 + ], + "spans": [ + { + "bbox": [ + 164, + 325, + 449, + 511 + ], + "type": "table", + "html": "
SQuALITY ROUGE-LLVBench accuracy
Nova Pro19.8 ±8.741.6 ±2.5
Nova Lite19.2 ±8.640.4 ±2.4
Nova Micro18.8 ±8.6-
Claude 3.5 Sonnet (Jun)13.4 ±7.5-
Gemini 1.5 Pro (001)-33.1 ±2.3
Gemini 1.5 Pro (002)19.1 ±8.6M-
Gemini 1.5 Flash (002)18.1 ±8.4M-
GPT-4o18.8 ±8.630.8 ±2.3
Llama 3 - 70B16.4 ±8.1-
Llama 3 - 8B15.3 ±7.9-
", + "image_path": "3812b6443d8505c899375917bc3608643f3f56d03d8761c7851c7a38dc3c85ce.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "lines": [ + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "type": "text", + "content": "Table 6: Text and Multimodal long context performance on SQuALITY (ROUGE-L) and LVBench (Accuracy). For SQuALITY, measurements for Claude 3.5 Sonnet, GPT-4o, Llama 3 70B and Llama 3 8B are taken from the Llama 3 report [45]. Gemini results were measured by " + }, + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "type": "inline_equation", + "content": "\\mathrm{us}^2" + }, + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 513, + 541, + 559 + ], + "type": "text", + "content": "). For LVBench, Gemini and GPT-4o numbers were taken from the corresponding benchmark leaderboard [77]." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 68, + 586, + 541, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 586, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 68, + 586, + 541, + 632 + ], + "type": "text", + "content": "Results for text and multimodal long context benchmarks are presented in Table 6. In the long video question answering task, both Amazon Nova Pro and Lite demonstrate robust performance on the LVBench dataset, surpassing other models. Amazon Nova models consistently demonstrate exceptional performance in retrieving information from any depth across both text and multimodal understanding use cases, delivering high accuracy and reliability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 654, + 181, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 654, + 181, + 666 + ], + "spans": [ + { + "bbox": [ + 69, + 654, + 181, + 666 + ], + "type": "text", + "content": "2.4 Functional expertise" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 677, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 677, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 677, + 541, + 723 + ], + "type": "text", + "content": "In addition to core capabilities, foundation models must perform well in particular specialties and domains. Across our many areas of performance analyses, we have selected four domains for which to present benchmarking results: Software engineering, financial analysis, and retrieval-augmented generation. Prompt templates for all benchmarks can be found in Appendix B.3." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 116, + 70, + 495, + 337 + ], + "blocks": [ + { + "bbox": [ + 116, + 70, + 495, + 337 + ], + "lines": [ + { + "bbox": [ + 116, + 70, + 495, + 337 + ], + "spans": [ + { + "bbox": [ + 116, + 70, + 495, + 337 + ], + "type": "table", + "html": "
SoftwareFinanceRAG
HumanEval PythonFinQACRAG
tok/sec0-shot pass@10-shot accuracyaccuracy
Nova Pro10089.0 ±4.877.2 ±0.950.3 ±1.9
Nova Lite15785.4 ±5.473.6 ±0.943.8 ±1.9
Nova Micro21081.1 ±6.065.2 ±1.043.1 ±1.9
Claude 3.5 Sonnet (Oct)5793.7 ±3.777.3 ±0.9M52.6 ±1.8M
Claude 3.5 Haiku6488.1 ±5.073.9 ±0.9M31.9 ±1.8M
Gemini 1.5 Pro (002)5887.8 ±5.0M74.4 ±0.9M48.9 ±1.9M
Gemini 1.5 Flash (002)19081.1 ±6.0M73.5 ±1.0M42.4 ±1.9M
Gemini 1.5 Flash 8B (001)28381.1 ±6.0M63.7 ±1.0M37.7 ±1.8M
GPT-4o16390.2 ±4.671.1 ±1.0M52.0 ±1.9M
GPT-4o Mini11387.2 ±5.170.6 ±1.0M49.9 ±1.9M
Llama 3.2 90B4080.5 ±6.172.8 ±1.0M45.2 ±1.9M
Llama 3.2 11B12472.6 ±6.860.8 ±1.1M42.2 ±1.9M
Llama 3.1 8B15772.6 ±6.861.2 ±1.0M42.2 ±1.8M
", + "image_path": "4925649280fa45f14562e7b6baa58f4947d95e95c004bfe6d7380dc0f925a1f9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "lines": [ + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "type": "text", + "content": "Table 7: Performance on select functional benchmarks, including software engineering benchmarks in Python with HumanEval [19], financial reasoning with FinQA [20], and retrieval augmented generation with CRAG [82]. CRAG uses our scoring method described in Section 2.4.3. Where available, reference numbers are taken from the corresponding benchmark papers and technical reports [13, 11, 32, 39, 45, 58]. Additional results were measured " + }, + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "type": "inline_equation", + "content": "(M)" + }, + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "type": "inline_equation", + "content": "\\mathrm{us}^2" + }, + { + "bbox": [ + 67, + 339, + 541, + 396 + ], + "type": "text", + "content": ". Model speed in tokens per second (Tok/Sec) is reproduced from section 2.5." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 428, + 194, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 428, + 194, + 440 + ], + "spans": [ + { + "bbox": [ + 69, + 428, + 194, + 440 + ], + "type": "text", + "content": "2.4.1 Software engineering" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 448, + 541, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 448, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 448, + 541, + 495 + ], + "type": "text", + "content": "We assessed Amazon Nova's code generation capabilities on the Python coding task HumanEval [19]. The benchmark contains 164 original programming problems with unit tests. These problems assess language comprehension, algorithms, and simple mathematics. Some problems are comparable to simple software interview questions. Table 7 provides the performance of our Nova models and select public models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 510, + 179, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 510, + 179, + 521 + ], + "spans": [ + { + "bbox": [ + 69, + 510, + 179, + 521 + ], + "type": "text", + "content": "2.4.2 Financial analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 530, + 541, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 530, + 541, + 587 + ], + "spans": [ + { + "bbox": [ + 67, + 530, + 541, + 587 + ], + "type": "text", + "content": "We use FinQA [20] to evaluate Amazon Nova's ability to understand financial data. FinQA is an expert-annotated dataset comprising 8,281 financial question-answer pairs derived from the earnings reports of S&P 500 companies. It evaluates a model's ability to extract information from both tables and unstructured text, while accurately performing calculations using relevant financial knowledge. We report the average post-rounding accuracy under the 0-shot CoT setting. Table 7 provides the performance of Amazon Nova models and select public models on FinQA." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 602, + 239, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 239, + 615 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 239, + 615 + ], + "type": "text", + "content": "2.4.3 Retrieval augmented generation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 623, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 623, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 623, + 541, + 723 + ], + "type": "text", + "content": "We evaluate RAG capabilities on the CRAG [82] benchmark using the Task 1 setup, which considers five pre-selected HTML pages as external knowledge to each input question. We extract top-20 text snippets from these pages following the standard retrieval approach used in CRAG's official repository, whereby pages are first cleaned using BeautifulSoup to remove HTML tags, after which the text is then split into sentences or chunks no longer than 1000 characters. These are then encoded using the sentence-transformers/all-MiniLM-L6-v2 model, which is also used to encode the question. The top 20 chunks with highest similarity are passed as context in the input for model inference. We report the percentage of correct responses as judged by an LLM (gpt-4-turbo-2024-04-09), which compares each model's answer with the expected answer using the prompt shown in Appendix B.3.2. Table 7 provides the performance of Amazon Nova models and selected public models on a combined validation and test set of 2,706 examples." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 189, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 189, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 189, + 84 + ], + "type": "text", + "content": "2.5 Runtime performance" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 92, + 541, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 541, + 170 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 541, + 170 + ], + "type": "text", + "content": "We evaluate the runtime performance of Amazon Nova models using three metrics: Time to First Token (TTFT), Output Tokens per Second (OTPS) and Total Response Time. TTFT is measured as the time, in seconds, it takes to receive the first token from the model after an API request is sent. OTPS is measured as the number of tokens generated per second (tok/sec). It is the rate at which a model produces subsequent output tokens after the first token, reflecting overall throughput and efficiency during inference. Total Response Time measures the total duration in seconds from the submission of the input prompt to the end of generation sequence for a given input-output prompt length. It represents the overall user experience for a model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 175, + 541, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 175, + 541, + 231 + ], + "spans": [ + { + "bbox": [ + 67, + 175, + 541, + 231 + ], + "type": "text", + "content": "In Figure 3, we show TTFT, OTPS, and Total Response Time using 1000 tokens of input and 100 tokens of output for Amazon Nova models and select public models as reported by Artificial Analysis5, an independent entity that benchmarks AI models and hosting providers. Amazon Nova Micro, Lite and Pro models are among the fastest models in their respective intelligence tiers. Together, all three Amazon Nova models demonstrate state-of-the-art runtime performance, ensuring a smooth and responsive user experience in many real world use cases." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 379, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 379, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 379, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 710, + 282, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 710, + 282, + 722 + ], + "spans": [ + { + "bbox": [ + 82, + 710, + 282, + 722 + ], + "type": "text", + "content": "5https://artificialanalysis.ai/methodology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 93, + 115, + 517, + 300 + ], + "blocks": [ + { + "bbox": [ + 93, + 115, + 517, + 300 + ], + "lines": [ + { + "bbox": [ + 93, + 115, + 517, + 300 + ], + "spans": [ + { + "bbox": [ + 93, + 115, + 517, + 300 + ], + "type": "image", + "image_path": "e774b73fa1e735c2e5327408e7138bc5a659ab582e779f6af3b14f94864a0daa.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 92, + 299, + 515, + 466 + ], + "blocks": [ + { + "bbox": [ + 92, + 299, + 515, + 466 + ], + "lines": [ + { + "bbox": [ + 92, + 299, + 515, + 466 + ], + "spans": [ + { + "bbox": [ + 92, + 299, + 515, + 466 + ], + "type": "image", + "image_path": "f66f29a61daabbd25d85f8a0f81690ced064d2fb9861e120cc24a4a32227f13c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 94, + 468, + 515, + 635 + ], + "blocks": [ + { + "bbox": [ + 94, + 468, + 515, + 635 + ], + "lines": [ + { + "bbox": [ + 94, + 468, + 515, + 635 + ], + "spans": [ + { + "bbox": [ + 94, + 468, + 515, + 635 + ], + "type": "image", + "image_path": "4754f9755877defa3dbe1fd9cd96eb21940d26e1151935c60631550c46e02dc5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "lines": [ + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "spans": [ + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "text", + "content": "Figure 3: Time to First Token " + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "text", + "content": ", Output Tokens per Second " + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "text", + "content": ", and Total Response Time " + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 67, + 641, + 541, + 675 + ], + "type": "text", + "content": " using 1,000 tokens of input and 100 tokens of output for Amazon Nova models and select publicly-available models (Artificial Analysis, Nov 29th, 2024)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 261, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 261, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 261, + 83 + ], + "type": "text", + "content": "3 Amazon Nova Canvas Evaluation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 95, + 541, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 95, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 67, + 95, + 541, + 140 + ], + "type": "text", + "content": "Amazon Nova Canvas is a diffusion model that takes a text prompt and an optional RGB image as input and generates an image as an output conditioned on the input text and optional image. Illustrative examples of the images generated by Amazon Nova Canvas can be found in our Amazon Science blog post " + }, + { + "bbox": [ + 67, + 95, + 541, + 140 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 67, + 95, + 541, + 140 + ], + "type": "text", + "content": ". In this section, we provide details on the evaluation strategy and performance of the model both in terms of automated metrics and human evaluation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 152, + 176, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 152, + 176, + 163 + ], + "spans": [ + { + "bbox": [ + 69, + 152, + 176, + 163 + ], + "type": "text", + "content": "3.1 Automated metrics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 172, + 443, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 172, + 443, + 184 + ], + "spans": [ + { + "bbox": [ + 67, + 172, + 443, + 184 + ], + "type": "text", + "content": "We use ImageReward [80] and Text-to-Image Faithfulness (TIFA) [38] as automated metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 193, + 541, + 275 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 96, + 193, + 541, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 193, + 541, + 227 + ], + "spans": [ + { + "bbox": [ + 96, + 193, + 541, + 227 + ], + "type": "text", + "content": "- ImageReward score is generated from a standardized reward model that aligns human preference with the predicted score. To compute the ImageReward score, we randomly sample 10k prompts from MSCOCO2014 [42] validation set and use this set for calculating the score." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 229, + 541, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 229, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 96, + 229, + 541, + 275 + ], + "type": "text", + "content": "- Text-to-Image Faithfulness (TIFA) score is a reference-free metric that measures the faithfulness of a generated image to the input text via visual question answering (VQA). The evaluation set for TIFA score is a pre-selected 4k prompts in the TIFA-v1.0 benchmark, sampled from MSCOCO captions [42], DrawBench [66], PartiPrompts [84], and PaintSkill [21] datasets." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 282, + 541, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 282, + 541, + 306 + ], + "spans": [ + { + "bbox": [ + 67, + 282, + 541, + 306 + ], + "type": "text", + "content": "We compare Amazon Nova Canvas with other publicly-available models including DALL.E 3 [16], Stable Diffusion 3 Medium [27], Stable Diffusion 3.5 Large [28] and Flux (Schnell and Pro) [17]. The results are shown in Table 8." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 189, + 315, + 421, + 423 + ], + "blocks": [ + { + "bbox": [ + 189, + 315, + 421, + 423 + ], + "lines": [ + { + "bbox": [ + 189, + 315, + 421, + 423 + ], + "spans": [ + { + "bbox": [ + 189, + 315, + 421, + 423 + ], + "type": "table", + "html": "
TIFAImageReward
Amazon Nova Canvas0.8971.250
DALL.E 30.8631.052
Stable Diffusion 3.5 Large0.8911.082
Stable Diffusion 3 Medium0.8810.952
Flux Pro 1.00.8751.075
Flux Schnell0.8820.999
", + "image_path": "1911bf0acd67bd15d0f2b53ab74ed3db72d3ea068b54888f5c0238b981b3c475.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 100, + 426, + 508, + 437 + ], + "lines": [ + { + "bbox": [ + 100, + 426, + 508, + 437 + ], + "spans": [ + { + "bbox": [ + 100, + 426, + 508, + 437 + ], + "type": "text", + "content": "Table 8: Comparison of TIFA and ImageReward metrics of Amazon Nova Canvas with other models." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 464, + 173, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 464, + 173, + 475 + ], + "spans": [ + { + "bbox": [ + 69, + 464, + 173, + 475 + ], + "type": "text", + "content": "3.2 Human evaluation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 484, + 541, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 484, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 67, + 484, + 541, + 552 + ], + "type": "text", + "content": "We conduct A/B testing to compare Amazon Nova Canvas with other third-party text-to-image models. The A/B testing prompt set is composed of approximately 1,000 prompts designed to capture customer usage of text-to-image models. This set includes prompts from datasets such as MSCOCO [42], Drawbench [66], OpenParti [84], DALL.E 3 Eval [16], and DOCCI [54] and covers a broad set of categories such as humans, landscapes, natural scenarios, indoor environments, creative themes, artistic themes, and so forth. A few prompts were randomly selected and repeated in order to get additional data points on the quality of the model." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 555, + 541, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 541, + 677 + ], + "type": "text", + "content": "With each prompt we generate an image from Amazon Nova Canvas as well as each other text-to-image model. We used random seeds to generate the images from Amazon Nova Canvas and all images were generated at " + }, + { + "bbox": [ + 67, + 555, + 541, + 677 + ], + "type": "inline_equation", + "content": "1\\mathrm{k}\\times 1\\mathrm{k}" + }, + { + "bbox": [ + 67, + 555, + 541, + 677 + ], + "type": "text", + "content": " resolution. If the prompts trigger filters such that an image is not generated, for either the Amazon Nova Canvas model or the public text-to-image model, we ignore that prompt and do not show it to the human raters. All human evaluation is done in a single-blind manner where the annotator is provided two sets of images, one from Amazon Nova Canvas and the other from the third-party model. The order of the images are randomized for each prompt and annotator. In our blind testing, we ask human annotators to select images that they prefer based on (1) text-image alignment, which measures the instruction-following capability of the model, and (2) image quality, which quantifies the overall preference of the annotators. To ensure rigorous, consistent, and unbiased evaluation, we used a third-party vendor for human evaluation. We created guidelines that were used to train the annotators so that the decision-making criteria were clear to them in each dimension." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 681, + 541, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 681, + 541, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 541, + 704 + ], + "type": "text", + "content": "The pair-wise results comparing Amazon Nova Canvas with OpenAI DALL.E 3 and Google Imagen 3 are shown in Table 9, including win, tie, loss rate. The win rate reflects the percentage of samples where Amazon Nova Canvas was" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 711, + 369, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 711, + 369, + 722 + ], + "spans": [ + { + "bbox": [ + 81, + 711, + 369, + 722 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 81, + 711, + 369, + 722 + ], + "type": "text", + "content": " https://www.amazon.science/blog/amazon-nova-canvas-examples" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 106 + ], + "type": "text", + "content": "preferred over the other model while the tie rate indicates the scenario where the human annotator did not perceive a difference between the two models. As can be seen in the results, Amazon Nova Canvas has a higher win rate compared to the other text-to-image models." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 77, + 115, + 533, + 180 + ], + "blocks": [ + { + "bbox": [ + 77, + 115, + 533, + 180 + ], + "lines": [ + { + "bbox": [ + 77, + 115, + 533, + 180 + ], + "spans": [ + { + "bbox": [ + 77, + 115, + 533, + 180 + ], + "type": "table", + "html": "
Nova Canvas versus:DALL.E 3Imagen 3
win ratetie rateloss ratewin ratetie rateloss rate
Overall preference (image quality)54.56.439.148.25.346.5
Instruction following (text-image alignment)39.422.538.138.428.133.5
", + "image_path": "24cd5ce105f84b663074971e3ad6ce5928eb157a204bca249b33984a69b84712.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 182, + 541, + 206 + ], + "lines": [ + { + "bbox": [ + 68, + 182, + 541, + 206 + ], + "spans": [ + { + "bbox": [ + 68, + 182, + 541, + 206 + ], + "type": "text", + "content": "Table 9: The win, tie, and loss rates (%) from human evaluation of Amazon Nova Canvas versus (a) DALL.E 3 and (b) Imagen 3." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 68, + 236, + 247, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 236, + 247, + 248 + ], + "spans": [ + { + "bbox": [ + 68, + 236, + 247, + 248 + ], + "type": "text", + "content": "4 Amazon Nova Reel Evaluation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 260, + 541, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 260, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 260, + 541, + 304 + ], + "type": "text", + "content": "Amazon Nova Reel is a diffusion model that takes a text prompt and an optional RGB image as input and generates a video as an output conditioned on the input text and optional image. Illustrative examples of the videos generated by the Amazon Nova Reel can be found in our Amazon Science blog post.7 In this section, we provide details on the evaluation strategy and performance of the model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 317, + 208, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 317, + 208, + 328 + ], + "spans": [ + { + "bbox": [ + 68, + 317, + 208, + 328 + ], + "type": "text", + "content": "4.1 Human evaluation metrics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 337, + 542, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 337, + 542, + 393 + ], + "spans": [ + { + "bbox": [ + 67, + 337, + 542, + 393 + ], + "type": "text", + "content": "To evaluate Amazon Nova Reel, we rely on human feedback to assess the generated videos across two primary axes: video quality and video consistency. All evaluations are conducted through single-blind pairwise comparisons. Human annotators are provided a set of two videos shown side-by-side and are asked to choose the better video or mark them as equal if they find the videos to be equally performant across the metric on which they are evaluating. All videos were generated in 720p resolution and different random seeds were used during generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 397, + 541, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 397, + 541, + 421 + ], + "spans": [ + { + "bbox": [ + 67, + 397, + 541, + 421 + ], + "type": "text", + "content": "The video quality axis encapsulates the technical and perceptual aspects of the generated video via four primary components:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 429, + 538, + 539 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 96, + 429, + 538, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 429, + 538, + 451 + ], + "spans": [ + { + "bbox": [ + 96, + 429, + 538, + 451 + ], + "type": "text", + "content": "- Image quality: The visual appeal of individual frames, including resolution, sharpness, object clarity, and overall composition, where each frame is visually pleasing and artifact-free." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 455, + 538, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 455, + 538, + 477 + ], + "spans": [ + { + "bbox": [ + 96, + 455, + 538, + 477 + ], + "type": "text", + "content": "- Motion quality: The fluidity of movement across frames, including motion consistency and smooth transitions without flickering, distortion, or abrupt shifts, contributing to natural and realistic motion portrayal." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 96, + 481, + 538, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 481, + 538, + 502 + ], + "spans": [ + { + "bbox": [ + 96, + 481, + 538, + 502 + ], + "type": "text", + "content": "- Image-text alignment: How closely individual frames match the prompt, considering the presence of described entities, their attributes, spatial relationships, colors, and other static visual details." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 96, + 506, + 538, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 506, + 538, + 539 + ], + "spans": [ + { + "bbox": [ + 96, + 506, + 538, + 539 + ], + "type": "text", + "content": "- Motion-text alignment: The accuracy of dynamic elements, including the correctness of actions performed by entities, camera movements, and temporal changes in attributes, as well as adherence to the provided description." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 548, + 541, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 548, + 541, + 572 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 541, + 572 + ], + "type": "text", + "content": "The video quality axis additionally includes factors influencing overall appeal, such as motion degree, entity size, creative composition, and general video likability." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 575, + 541, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 575, + 541, + 621 + ], + "spans": [ + { + "bbox": [ + 67, + 575, + 541, + 621 + ], + "type": "text", + "content": "The video consistency axis encapsulates the temporal coherence of both subjects and backgrounds throughout the video. It includes assessments of the maintenance of entity size, shape, and appearance, as well as background stability without unexpected morphing or changes. A high score in this dimension means believable spatial relationships between foreground and background elements throughout the video duration." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 625, + 541, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 625, + 541, + 649 + ], + "spans": [ + { + "bbox": [ + 67, + 625, + 541, + 649 + ], + "type": "text", + "content": "In combination, the video quality and video consistency metrics provide a holistic and robust evaluation framework for video generation models by considering both technical accuracy and perceptual appeal." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 660, + 127, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 660, + 127, + 671 + ], + "spans": [ + { + "bbox": [ + 68, + 660, + 127, + 671 + ], + "type": "text", + "content": "4.2 Dataset" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 681, + 541, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 681, + 541, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 681, + 541, + 704 + ], + "type": "text", + "content": "We curated a diverse set of prompts designed to capture various aspects of video generation. The prompts are distributed across 6 broad categories: human and activities, animals, natural scenery and landscapes, indoor scenes, objects" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 711, + 358, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 711, + 358, + 722 + ], + "spans": [ + { + "bbox": [ + 82, + 711, + 358, + 722 + ], + "type": "text", + "content": "7https://www.amazon.science/blog/amazon-nova-reel-examples" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 172 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 172 + ], + "type": "text", + "content": "interactions, and creative scenes and activities. This broad categorization ensures that the evaluation covers a wide range of real-world scenarios. We structured the prompt set to cover various motion-related aspects, which is critical for assessing motion-text alignment in the generated videos. For example, we included prompts with a variety of camera motions to evaluate how well the models follow instructions related to camera movement. Additionally, we incorporated dynamic attributes [71], in which the subject or background undergoes state or shape changes over time, which allows us to evaluate the model's ability to generate evolving entities. Finally, we added prompts that require motion binding [71], where specific compositions of movements and actions are requested, enabling us to assess how well models can generate complex, coordinated motions. The curated prompt set consists of approximately 700 prompts, all from various open source benchmarks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 184, + 234, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 184, + 234, + 196 + ], + "spans": [ + { + "bbox": [ + 69, + 184, + 234, + 196 + ], + "type": "text", + "content": "4.3 Implementation details & results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 205, + 541, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 205, + 541, + 326 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 541, + 326 + ], + "type": "text", + "content": "To ensure a rigorous, consistent and unbiased evaluation process, we outsourced the annotation collection process to a third-party vendor. We created detailed guidelines, in which annotators were given comprehensive instructions and examples for each evaluation dimension, ensuring clarity on the criteria for marking preferences between videos. These guidelines included examples of different scenarios to aid in decision-making across our evaluation axes. Alongside this, we ensured that annotators were trained using expert-provided examples, with each round of annotations subject to spot checks. Specifically, " + }, + { + "bbox": [ + 67, + 205, + 541, + 326 + ], + "type": "inline_equation", + "content": "5 - 10\\%" + }, + { + "bbox": [ + 67, + 205, + 541, + 326 + ], + "type": "text", + "content": " of the data from each batch was randomly selected and reviewed by expert annotators. Based on this feedback, the vendor continuously refined the annotators' understanding and accuracy, ensuring a high standard of evaluation across the board. To further enhance the reliability of the results, we employed a consensus voting system. For each video comparison, annotations were collected from three different evaluators, and a majority voting approach was used to determine the final outcome. This method helps reduce individual biases and ensures that the final assessments are based on collective judgment, thereby increasing the robustness of the evaluation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 330, + 541, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 330, + 541, + 397 + ], + "spans": [ + { + "bbox": [ + 67, + 330, + 541, + 397 + ], + "type": "text", + "content": "For reporting performance, we conducted pairwise comparisons between Amazon Nova Reel and other state-of-the-art models including Gen3 Alpha [65] by Runway ML and Luma 1.6 [47] by Luma Labs. We report results in terms of win, tie, and loss rates. The win rate reflects the percentage of samples where Amazon Nova Reel was preferred over the other model, while the tie rate indicates cases where no perceptible difference between the two models was found by the evaluators. Using the curated prompt set described earlier, we evaluate the models across all the dimensions outlined above, and report the results in Table 10." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 129, + 406, + 482, + 470 + ], + "blocks": [ + { + "bbox": [ + 129, + 406, + 482, + 470 + ], + "lines": [ + { + "bbox": [ + 129, + 406, + 482, + 470 + ], + "spans": [ + { + "bbox": [ + 129, + 406, + 482, + 470 + ], + "type": "table", + "html": "
Nova Reel versus:Runway Gen3 AlphaLuma 1.6
win ratetie rateloss ratewin ratetie rateloss rate
Video Quality56.49.933.751.13.445.5
Video Consistency67.09.123.974.75.120.2
", + "image_path": "460ae686ce3b421bcd1418395cc5925da64a59db4ea8972139858cf04c0e9636.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 473, + 541, + 495 + ], + "lines": [ + { + "bbox": [ + 67, + 473, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 473, + 541, + 495 + ], + "type": "text", + "content": "Table 10: The win, tie, and loss rates " + }, + { + "bbox": [ + 67, + 473, + 541, + 495 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 67, + 473, + 541, + 495 + ], + "type": "text", + "content": " from human evaluation of Amazon Nova Reel versus (a) Gen3-Alpha and (b) Luma1.6." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "spans": [ + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "text", + "content": "In video consistency, Amazon Nova Reel achieved win rates of " + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "inline_equation", + "content": "67.0\\%" + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "text", + "content": " against Gen3 Alpha and " + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "inline_equation", + "content": "74.7\\%" + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "text", + "content": " against Luma 1.6, demonstrating superior subject and background coherence. For video quality, Amazon Nova Reel secured win rates of " + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "inline_equation", + "content": "56.4\\%" + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "text", + "content": " against Gen3 Alpha and " + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "inline_equation", + "content": "51.1\\%" + }, + { + "bbox": [ + 67, + 515, + 541, + 550 + ], + "type": "text", + "content": " against Luma 1.6." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 564, + 168, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 564, + 168, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 564, + 168, + 578 + ], + "type": "text", + "content": "5 Responsible AI" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 589, + 541, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 589, + 541, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 589, + 541, + 656 + ], + "type": "text", + "content": "Our approach to Responsible AI (RAI) is structured around eight foundational dimensions [10] shown in Table 11. These dimensions guide our approach to RAI for the Amazon Nova family of models, which we articulate in the following three sections: (1) defining our RAI design objectives, (2) our actions to ensure adherence to these objectives, and (3) system evaluation and red teaming. The last two components form a continuous loop of model development and human/automated verification to ensure that our Amazon Nova models are aligned with our RAI objectives and deliver an exceptional and delightful customer experience." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 669, + 214, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 669, + 214, + 681 + ], + "spans": [ + { + "bbox": [ + 69, + 669, + 214, + 681 + ], + "type": "text", + "content": "5.1 Defining our RAI objectives" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "type": "text", + "content": "We operationalize our RAI dimensions into a series of detailed design objectives that guide our decision-making throughout the entire model development lifecycle, from initial data collection and pre-training to the implementation of post-deployment runtime mitigations." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 378, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 378, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 378, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 70, + 541, + 231 + ], + "blocks": [ + { + "bbox": [ + 70, + 70, + 541, + 231 + ], + "lines": [ + { + "bbox": [ + 70, + 70, + 541, + 231 + ], + "spans": [ + { + "bbox": [ + 70, + 70, + 541, + 231 + ], + "type": "table", + "html": "
TermDefinition
FairnessConsidering impacts on different groups of stakeholders
ExplainabilityUnderstanding and evaluating system outputs
Privacy and securityAppropriately obtaining, using, and protecting data and models
SafetyPreventing harmful system output and misuse
ControllabilityHaving mechanisms to monitor and steer AI system behavior
Veracity and robustnessAchieving correct system outputs, even with unexpected or adversarial inputs
GovernanceIncorporating best practices into the AI supply chain, including providers and deployers
TransparencyEnabling stakeholders to make informed choices about their engagement with an AI system
", + "image_path": "55f6921dfdfd6300177137ccb747563b22ab1a28a8d7fd0686e6d8523cfcaf2e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 198, + 233, + 411, + 245 + ], + "lines": [ + { + "bbox": [ + 198, + 233, + 411, + 245 + ], + "spans": [ + { + "bbox": [ + 198, + 233, + 411, + 245 + ], + "type": "text", + "content": "Table 11: Our eight core Responsible AI dimensions" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 277, + 541, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 277, + 541, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 277, + 541, + 344 + ], + "type": "text", + "content": "In addition to being grounded on the RAI dimensions, our objectives are informed by relevant laws and regulations, voluntary frameworks, and our commitments to our customers, and they undergo an internal alignment process that includes reviews from a number of stakeholders. We will continue to iterate on these objections as we engage with external experts and participate in industry and government forums, including the Frontier Model Forum [29], Partnership on AI [5], and various forums organized by government agencies such as the National Institute of Standards and Technology (NIST) of the U.S. Department of Commerce [7]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 359, + 541, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 359, + 541, + 448 + ], + "spans": [ + { + "bbox": [ + 67, + 359, + 541, + 448 + ], + "type": "text", + "content": "Our commitment to Responsible Scaling: As the capabilities of AI models increase (through increased training data, model size or architecture innovations), so do the potential risks that they present. We joined other technology companies in signing on to the White House's voluntary commitments on the safe, secure, and transparent development and use of foundation models [6]. Since then we have actively participated in other efforts, including the AI Safety Summits in the UK and Seoul, and we have committed to new standards like the G7 AI Hiroshima Process Code of Conduct [30] in accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. We also started a partnership with the Model Evaluation and Threat Research (METR) center8 to enrich our Controllability design objectives." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 464, + 257, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 464, + 257, + 477 + ], + "spans": [ + { + "bbox": [ + 69, + 464, + 257, + 477 + ], + "type": "text", + "content": "5.2 Ensuring adherence to RAI objectives" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 487, + 541, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 487, + 541, + 586 + ], + "spans": [ + { + "bbox": [ + 67, + 487, + 541, + 586 + ], + "type": "text", + "content": "We employed a number of methods to measure and ensure compliance for each of our core RAI dimensions depending on their scope (i.e., whether they apply to model output, data management or other processes). For the dimensions that govern model behavior (Safety, Fairness, Veracity and Robustness, Controllability, and Privacy and Security), we curated the pre-training data and we used both Supervised Fine Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methods to align our models. Based on the objectives for each RAI dimension, we created single- and multi-turn RAI demonstrations in multiple languages and conducted helpfulness/harmfulness studies to decide on SFT data mixes. We collected human preference data to be used as inputs to RLHF training where we also provided an RAI-specific reward model. We also identify risk areas during our offline evaluation or red teaming exercises (Section 5.4) and collect semantically similar examples to be included in future SFT and RLHF rounds." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 590, + 541, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 541, + 647 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 541, + 647 + ], + "type": "text", + "content": "In addition to the RAI model alignment, we built runtime input and output moderation models which serve as a first and last line of defense and allow us to respond more quickly to newly identified threats or gaps in model alignment. The main role of the input moderation model is to detect prompts that contain malicious, insecure or illegal material, or attempt to bypass the core model alignment (prompt injection, jailbreaking). Similarly, the output moderation ensures that the content adheres to our RAI objectives." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "type": "text", + "content": "We have a rigorous Governance methodology, developing our models in a working-backwards product process that incorporates RAI at the design phase, design consultations and implementation assessments by dedicated RAI science and data experts, and includes routine testing, reviews with customers, best practice development, dissemination, and training." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 710, + 168, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 710, + 168, + 722 + ], + "spans": [ + { + "bbox": [ + 82, + 710, + 168, + 722 + ], + "type": "text", + "content": "8https://metr.org/" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 138 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 138 + ], + "type": "text", + "content": "We work to ensure that our Privacy and Security objectives are adhered to for both the model and training data. In addition to the model output alignment described above, we take measures that include data access controls [9] protecting our model training data, resulting weights, and model versions, and watermarking model outputs (see below). We address the latter through several layers of defense, including de-identifying or removing certain types of personal data from our training data, when feasible, as well as evaluation through red teaming exercises that cover data privacy assessments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 143, + 541, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 143, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 67, + 143, + 541, + 201 + ], + "type": "text", + "content": "For Explainability of our models' outputs we conduct and leverage the current active research in the area of Explainable AI to deeply understand our models' current behavior, their potential future behavior, and to build capabilities to continuously correct their behavior as and when necessary. We use various explainable AI methods throughout our model development to guide our decisions regarding RAI alignment and other mitigations. Services like Clarify [8] also enable our downstream developers to easily explain model predictions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 203, + 541, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 203, + 541, + 260 + ], + "spans": [ + { + "bbox": [ + 67, + 203, + 541, + 260 + ], + "type": "text", + "content": "To work to ensure our models' Robustness against adversarial inputs such as those that attempt to bypass alignment guardrails, we focused on risks applicable to both developers building applications using our models, and users interacting with our models via those applications. We organized those risks in broad categories such as sensitive data exfiltration, execution of unauthorized action, degradation of run-time model service availability, and malicious content generation. We used this risk organization to build model resiliency against interactions that lead to the prioritized risks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 263, + 541, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 263, + 541, + 352 + ], + "spans": [ + { + "bbox": [ + 67, + 263, + 541, + 352 + ], + "type": "text", + "content": "Finally, to maximize Transparency, we incorporate an invisible watermark during the image or video generation process and add " + }, + { + "bbox": [ + 67, + 263, + 541, + 352 + ], + "type": "inline_equation", + "content": "\\mathrm{C2PA}^9" + }, + { + "bbox": [ + 67, + 263, + 541, + 352 + ], + "type": "text", + "content": " metadata in all Canvas generated content. We enhanced the robustness to alterations like rotation, resizing, color inversion, and flipping. For videos, we embed our watermark in each frame and ensure that our watermarking and detection methods withstand H264 compression. To enable anyone to easily detect the watermarks in Amazon Nova generated content, an API will be available soon after launch. Our watermark detection system introduces several enhancements such as making confidence score-based predictions instead of a single binary prediction that reflects the extent to which the generated content has been edited even when using external tools. The new detection system covers both images and videos." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 365, + 162, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 365, + 162, + 376 + ], + "spans": [ + { + "bbox": [ + 69, + 365, + 162, + 376 + ], + "type": "text", + "content": "5.3 RAI Evaluation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 384, + 541, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 384, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 384, + 541, + 483 + ], + "type": "text", + "content": "Throughout model development we perform extensive RAI evaluations using publicly available benchmarks like BOLD [25], RealToxicityPrompts [31], and MM-SafetyBench [44]. We also built a series of proprietary, dynamically updating benchmarks. To build them, our internal data annotation team created a diverse set of examples for each of our RAI dimensions. In addition, we leveraged subject-matter experts in specific areas, such as Security and Controllability, to collect adversarial prompts. We continued updating and enhancing each dataset based on evaluation and red teaming results (see Section 5.4 for more details on red teaming). This kept the internal benchmarks evergreen, avoiding overfitting during development, but also made sure the models do not regress against previously identified risks. Our datasets comprise inputs in multiple languages and multiple modalities, and contain single-turn and multi-turn conversation examples." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 497, + 151, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 497, + 151, + 509 + ], + "spans": [ + { + "bbox": [ + 69, + 497, + 151, + 509 + ], + "type": "text", + "content": "5.4 Red Teaming" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 517, + 541, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 517, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 67, + 517, + 541, + 563 + ], + "type": "text", + "content": "Static benchmarks give us a view of how well models perform per RAI dimension against a user's \"plain\" intent (i.e. the prompts explicitly state the intent of the user to generate prohibited content). To test our models' resilience against techniques that mask the users' intent we rely on red teaming. We employed a multi-pronged evaluation strategy consisting of internal red teaming, red teaming with third party and subject matter experts and, automated red teaming." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 574, + 197, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 197, + 586 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 197, + 586 + ], + "type": "text", + "content": "5.4.1 Internal Red Teaming" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 593, + 541, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 593, + 541, + 703 + ], + "spans": [ + { + "bbox": [ + 67, + 593, + 541, + 703 + ], + "type": "text", + "content": "We used a team of trained data analysts and subject-matter experts to perform regular red teaming exercises to evaluate the model's robustness against adversarial prompts across all our RAI dimensions. We enhanced the diversity of manually curated adversarial prompts by employing linguistic, structural, and modality based prompt mutation techniques, assessing each mutation for its effectiveness at generating a response that does not adhere to our RAI objectives, likelihood of its success, and the technique's novelty to a model revision. In total, we identified and developed over 300 distinct techniques (see Figure 4), and tested techniques individually and via chaining various combinations. The attacks covered multiple languages and modalities, targeting each language/modality individually and in combination. We designed cross-modality attacks, such as embedding adversarial content within seemingly benign visual inputs, to evaluate the models' ability to handle complex scenarios involving multiple input types. Where appropriate, we implemented automation to further improve the diversity, reliability, and efficiency of red teaming." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 710, + 168, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 710, + 168, + 723 + ], + "spans": [ + { + "bbox": [ + 82, + 710, + 168, + 723 + ], + "type": "text", + "content": "9https://c2pa.org/" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 96, + 71, + 517, + 340 + ], + "blocks": [ + { + "bbox": [ + 96, + 71, + 517, + 340 + ], + "lines": [ + { + "bbox": [ + 96, + 71, + 517, + 340 + ], + "spans": [ + { + "bbox": [ + 96, + 71, + 517, + 340 + ], + "type": "image", + "image_path": "744fe999753b87a0d695ae52fa9855a52df774d240e85120dba86e378b9958a8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 349, + 496, + 361 + ], + "lines": [ + { + "bbox": [ + 113, + 349, + 496, + 361 + ], + "spans": [ + { + "bbox": [ + 113, + 349, + 496, + 361 + ], + "type": "text", + "content": "Figure 4: Broad taxonomy and count of attack techniques we use for our red teaming exercises" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 388, + 541, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 388, + 541, + 411 + ], + "spans": [ + { + "bbox": [ + 68, + 388, + 541, + 411 + ], + "type": "text", + "content": "After each round of red teaming, we gathered feedback from the team regarding failure patterns which guided the next stage of the model development." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 427, + 198, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 427, + 198, + 440 + ], + "spans": [ + { + "bbox": [ + 69, + 427, + 198, + 440 + ], + "type": "text", + "content": "5.4.2 External Red Teaming" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 449, + 541, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 449, + 541, + 602 + ], + "spans": [ + { + "bbox": [ + 68, + 449, + 541, + 602 + ], + "type": "text", + "content": "In accordance with our commitment to the US White House on ensuring Safe, Secure, and Trustworthy Artificial Intelligence, we partner with a variety of third parties to conduct red teaming against our AI models. These initiatives are in addition to our extensive in-house efforts, which includes all aspects of Cybersecurity red teaming. Just like with our internal red teaming efforts, we iterated during the model development based on feedback from these institutions to improve the RAI adherence of our models. We leverage red-teaming firms including ActiveFence to conduct testing in areas such as hate speech, political misinformation, extremism and other RAI dimensions. We also work with specialized third parties to red team our models for Chemical, Biological, Radiological and Nuclear (CBRN) capabilities. Our work with Deloitte Consulting, tests our AI models' capabilities in Biological risks and harms. Our work with Nemesys Insights LLC tests our AI models' capabilities in the Radiological and Nuclear domains. We also work with the Gomes Group at Carnegie Mellon University to test our models' capabilities in Chemistry and chemical compounds. Each of these partners was carefully selected based on their industry leadership, previous/parallel red teaming work with other AI model developers, and their contributions to evolving government and industry standards around CBRN and overall AI safety. We provide a brief summary of expertise of each of these vendors and their testing methodology below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 607, + 541, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 607, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 68, + 607, + 541, + 662 + ], + "type": "text", + "content": "ActiveFence: ActiveFence is a team of over 150 subject matter experts providing AI Safety and Content Moderation solutions. The team produced over 9,700 adversarial prompts, distributed over 20 categories, including content-targeted red teaming (evaluating the model's ability to generate harmful or inappropriate content), and security-targeted red teaming (assessing the model's resilience against malicious attempts to manipulate its behavior or extract sensitive information)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 667, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 667, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 667, + 541, + 723 + ], + "type": "text", + "content": "Deloitte: The evaluation team at Deloitte Consulting LLP (formerly known as Gryphon Scientific) has unique experience at the intersection of artificial intelligence and biology. The primary thrust of this effort involved evaluating the model against a panel of 30 questions developed to test an LLM's scientific knowledge and reasoning capabilities that could facilitate the development or use of biological weapons. The model's responses to these questions were evaluated for their scientific accuracy and utility to someone seeking to do harm with biology. After completing the initial" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 95 + ], + "type": "text", + "content": "evaluations, the Deloitte team probed more deeply into the questions the LLM originally replied with potentially concerning information." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 100, + 541, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 541, + 189 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 541, + 189 + ], + "type": "text", + "content": "Gomes Group: The Gomes Group at Carnegie Mellon University is at the forefront of integrating advanced artificial intelligence into chemical research. Their evaluation framework consisted of both automated and non-automated assessments. Two non-automated evaluations explored aggregation attack vulnerabilities through purchasing and remote chemical mixing scenarios. The automated evaluations utilized two distinct datasets: one containing 39 hazardous chemicals (including DEA Schedule I, II, and chemical warfare agents) and another with 362 common chemicals for NFPA diamond classifications. Three primary automated evaluations were conducted using the hazardous chemicals dataset. The NFPA diamond evaluation comprised 1,810 prompts, testing both single-turn and multi-turn approaches with consistent accuracy across both methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 192, + 541, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 192, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 192, + 541, + 281 + ], + "type": "text", + "content": "Nemesys: Nemesys Insights LLC run uplift studies, red teaming exercises, and risk assessments for a variety of technology companies and third-party research entities to assess national security related risks of large language models and other generative AI tools. For their testing, they started with human red teaming exercises focused on non-state acquisition or use of illicit radiological/nuclear (RN) materials, followed by prompt-response evaluation and uplift studies. The exercises comprised two different scenarios (a. violent non-state actor acquisition and use of Cobalt-60; b. non-state actor acquisition and international transport of HEU [highly enriched uranium]), and utilized 8 subject matter experts with operational and technological knowledge in a 2-team x 2-scenario design to construct and refine threat plans across a 6-hour planning cycle." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 292, + 209, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 292, + 209, + 304 + ], + "spans": [ + { + "bbox": [ + 69, + 292, + 209, + 304 + ], + "type": "text", + "content": "5.4.3 Automated Red Teaming" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 310, + 541, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 310, + 541, + 431 + ], + "spans": [ + { + "bbox": [ + 67, + 310, + 541, + 431 + ], + "type": "text", + "content": "Finally, to augment human based red teaming, we built an automated red teaming mechanism by adapting our (Feedback Loop In-context Red Teaming) FLIRT [52] framework. This approach helped us scale red teaming and repeat red teaming efficiently. FLIRT uses a list of seed prompts that have been identified by human evaluators as potentially violating one or more of our RAI dimensions. For every dimension, a subset of seeds is used to generate additional prompts with a dedicated language model, called red-LM, through in-context-learning (ICL) [18] and a carefully crafted set of instructions. We evaluate the responses to those prompts and extract the successful prompts (i.e., the ones triggering a prohibited response) for the next round of generation. The above steps are repeated for a chosen number of iterations across all RAI categories. We use our automated red teaming mechanism to evaluate both RAI adherence robustness and false refusals. We use the mechanism to generate adversarial tests across multi-turn interactions, multiple languages, and multiple input/output modalities to uncover and correct robustness issues in our models due to potential adversarial content in such interactions and inputs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 446, + 212, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 446, + 212, + 460 + ], + "spans": [ + { + "bbox": [ + 69, + 446, + 212, + 460 + ], + "type": "text", + "content": "6 Training Infrastructure" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "text", + "content": "The Nova family of models were trained on Amazon's custom Trainium1 (TRN1) chips," + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "text", + "content": " NVidia A100 (P4d instances), and H100 (P5 instances) accelerators. Working with AWS SageMaker, we stood up NVidia GPU and TRN1 clusters and ran parallel trainings to ensure model performance parity, while optimizing training throughput on the different stacks. All clusters utilize petabit-scale non-blocking EFA network fabric which is less prone to packet loss than other network transport protocols" + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "inline_equation", + "content": "^{11}" + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "text", + "content": " and provides the highest network bandwidth with H100 accelerators compared to any other instance type available on AWS EC2" + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 67, + 471, + 541, + 570 + ], + "type": "text", + "content": ". We conducted distributed training on AWS SageMaker-managed Elastic Kubernetes Service (EKS) clusters, and utilized AWS File System X (FSx) and Simple Storage Solution (S3) for data and checkpoint IO. While FSx offers performant and convenient storage for large scale training jobs, S3 allowed cost-efficient scaling to large multimodal datasets and model checkpoints." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "text", + "content": "Goodput achieved weekly average values of up to " + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "inline_equation", + "content": "97\\%" + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "text", + "content": " in pretraining runs through optimizations targeting lower job failure rate, minimizing checkpoint overhead, and overall reduction in the Mean Time to Restart (MTTR). This time is inclusive of time from the last successful checkpoint before training interruption, time taken to restart components of the system and resume training at steady state from checkpoint. Techniques such as fully distributed optimizer state and weight sharding and the elimination of all blocking overhead associated with checkpoint persistence resulted in a reduction of checkpointing overhead to " + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "inline_equation", + "content": "\\sim 1" + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "text", + "content": " sec on H100 clusters, and " + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "inline_equation", + "content": "\\sim 0.1" + }, + { + "bbox": [ + 67, + 574, + 541, + 653 + ], + "type": "text", + "content": " sec on TRN1 clusters. We exceeded our MTTR target of 9 minutes and achieved an average of 6.5 minutes on our TRN1 clusters by optimizing the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 658, + 541, + 680 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 658, + 541, + 680 + ], + "spans": [ + { + "bbox": [ + 68, + 658, + 541, + 680 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 68, + 658, + 541, + 680 + ], + "type": "text", + "content": "https://aws.amazon.com/blogs/aws/amazon-ec2-trn1-instances-for-high-performance-model-training-g-are-now-available/" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 681, + 539, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 681, + 539, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 681, + 539, + 700 + ], + "type": "text", + "content": "11https://www.amazon.science/publications/a-cloud-optimized-transport-protocol-for-elastic-and-scalable-hpc" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 700, + 539, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 700, + 539, + 721 + ], + "spans": [ + { + "bbox": [ + 69, + 700, + 539, + 721 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 69, + 700, + 539, + 721 + ], + "type": "text", + "content": "https://aws.amazon.com/blogs/aws/new-amazon-ec2-p5-instances-powered-by-nvidia-h100-tensor-core-gpus-for-accelerating- generative-ai-and-hpc-applications/" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 72, + 541, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 541, + 138 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 541, + 138 + ], + "type": "text", + "content": "node communication initialization in the training startup process and reduced time to load checkpoints through an asynchronous observer process. This process maps each latest checkpoint file to its corresponding node in the cluster. When resuming from the checkpoint, each node only loads the checkpoint files for its corresponding rank, reducing the time taken to discover the latest checkpoint from 3 minutes to 5 seconds. We also cache and reuse data indices to optimize training data loading initialization time. These improvements reduced data loading initialization to 205ms per restart." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "spans": [ + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "type": "text", + "content": "To increase training efficiency we developed a new activation checkpointing scheme called Super-Selective Activation Checkpointing (SSC). SSC minimizes activation re-computation in memory-constrained environments, reducing memory consumption by " + }, + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "type": "inline_equation", + "content": "\\sim 50\\%" + }, + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "type": "text", + "content": " while adding " + }, + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "type": "inline_equation", + "content": "\\sim 2\\%" + }, + { + "bbox": [ + 70, + 143, + 541, + 221 + ], + "type": "text", + "content": " re-computation overhead compared to NVidia's Selective Checkpointing. We also found optimizations in default gradient reduction behavior and the default PyTorch memory allocator behavior. The default gradient reduction behavior leads to suboptimal communication overlap and we found the synchronous nature of the default PyTorch allocation led to stragglers in collectives resulting in multiple stalled workers. We adjusted the gradient reduction order and frequency, allowing us to overlap the majority of data parallelism communication." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 378, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 378, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 378, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 128, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 128, + 83 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 128, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 89, + 541, + 721 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 75, + 89, + 496, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 89, + 496, + 102 + ], + "spans": [ + { + "bbox": [ + 75, + 89, + 496, + 102 + ], + "type": "text", + "content": "[1] Efficient Batch Computing - AWS Batch - AWS, 2024. URL https://aws.amazon.com/batch/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 108, + 460, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 108, + 460, + 121 + ], + "spans": [ + { + "bbox": [ + 75, + 108, + 460, + 121 + ], + "type": "text", + "content": "[2] Big Data Platform - Amazon EMR - AWS, 2024. URL https://aws.amazon.com/emr/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 127, + 541, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 127, + 541, + 148 + ], + "spans": [ + { + "bbox": [ + 75, + 127, + 541, + 148 + ], + "type": "text", + "content": "[3] AgentStudio. Gemini flash. https://computer-agents.github.io/agent-studio/, 2024. Accessed: 2024-11-29." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 156, + 541, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 156, + 541, + 222 + ], + "spans": [ + { + "bbox": [ + 75, + 156, + 541, + 222 + ], + "type": "text", + "content": "[4] P. Agrawal, S. Antoniak, E. B. Hanna, B. Bout, D. Chaplot, J. Chudnovsky, D. Costa, B. D. Monicault, S. Garg, T. Gervet, S. Ghosh, A. Héliou, P. Jacob, A. Q. Jiang, K. Khandelwal, T. Lacroix, G. Lample, D. L. Casas, T. Lavril, T. L. Scao, A. Lo, W. Marshall, L. Martin, A. Mensch, P. Muddireddy, V. Nemychnikova, M. Pellat, P. V. Platen, N. Raghuraman, B. Rozière, A. Sablayrolles, L. Saulnier, R. Sauvestre, W. Shang, R. Soletskyi, L. Stewart, P. Stock, J. Studnia, S. Subramanian, S. Vaze, T. Wang, and S. Yang. Pixtral 12B, 2024. URL https://arxiv.org/abs/2410.07073." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 229, + 539, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 229, + 539, + 252 + ], + "spans": [ + { + "bbox": [ + 75, + 229, + 539, + 252 + ], + "type": "text", + "content": "[5] Amazon. Amazon joins Partnership on AI. https://www/aboutamazon.com/news/amazon-ai/amazon-joints-partnership-on-ai, 2016. Accessed: 2024-11-20." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 259, + 539, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 259, + 539, + 281 + ], + "spans": [ + { + "bbox": [ + 75, + 259, + 539, + 281 + ], + "type": "text", + "content": "[6] Amazon. Our commitment to the responsible use of AI. https://www/aboutamazon.com/news/company-news/amazon-responsible-ai, 2023. Accessed: 2024-11-20." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 289, + 539, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 289, + 539, + 322 + ], + "spans": [ + { + "bbox": [ + 75, + 289, + 539, + 322 + ], + "type": "text", + "content": "[7] Amazon. Amazon joins US Artificial Intelligence safety institute to advance responsible AI. https://www.abou tamazon.com/news/policy-news-views/amazon-joins-us-artificial-intelligence-safety-i nstitute-to-advance-responsible-ai, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 329, + 541, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 329, + 541, + 350 + ], + "spans": [ + { + "bbox": [ + 75, + 329, + 541, + 350 + ], + "type": "text", + "content": "[8] Amazon. Amazon SageMaker Clarify. https://aws.amazon.com/sagemaker/clarify/, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 357, + 541, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 357, + 541, + 380 + ], + "spans": [ + { + "bbox": [ + 75, + 357, + 541, + 380 + ], + "type": "text", + "content": "[9] Amazon. Data protection & privacy at AWS. https://aws.amazon.com/compliance/data-protection/, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 387, + 541, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 387, + 541, + 409 + ], + "spans": [ + { + "bbox": [ + 70, + 387, + 541, + 409 + ], + "type": "text", + "content": "[10] Amazon. Building AI responsibly at AWS. https://aws.amazon.com/ai/responsible-ai/, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 416, + 541, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 416, + 541, + 450 + ], + "spans": [ + { + "bbox": [ + 70, + 416, + 541, + 450 + ], + "type": "text", + "content": "[11] Anthropic. The Claude 3 model family: Opus, Sonnet, Haiku. Technical report, Anthropic, 2023. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 456, + 533, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 456, + 533, + 469 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 533, + 469 + ], + "type": "text", + "content": "[12] Anthropic. Claude Sonnet. https://www.anthropic.com/claude/sonnet, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 475, + 416, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 475, + 416, + 488 + ], + "spans": [ + { + "bbox": [ + 70, + 475, + 416, + 488 + ], + "type": "text", + "content": "[13] Anthropic AI. Claude 3.5 Sonnet model card addendum. Technical report, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 494, + 539, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 494, + 539, + 517 + ], + "spans": [ + { + "bbox": [ + 70, + 494, + 539, + 517 + ], + "type": "text", + "content": "[14] Anthropic AI Team. Claude 3.5 Haiku and upgraded Claude 3.5 Sonnet, 2024. URL https://assets.anthropic.com/m/1cd9d098ac3e6467/original/Claude-3-Model-Card-October-Addendum.pdf." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 523, + 523, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 523, + 523, + 536 + ], + "spans": [ + { + "bbox": [ + 69, + 523, + 523, + 536 + ], + "type": "text", + "content": "[15] S. Arora and B. Barak. Computational complexity: a modern approach. Cambridge University Press, 2009." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 541, + 539, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 541, + 539, + 565 + ], + "spans": [ + { + "bbox": [ + 70, + 541, + 539, + 565 + ], + "type": "text", + "content": "[16] J. Betker, G. Goh, L. Jing, T. Brooks, J. Wang, L. Li, L. Ouyang, J. Zhuang, J. Lee, Y. Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 571, + 494, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 571, + 494, + 583 + ], + "spans": [ + { + "bbox": [ + 69, + 571, + 494, + 583 + ], + "type": "text", + "content": "[17] Black Forest Labs. Flux models. 2024. URL https://github.com/black-forest-labs/flux." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 590, + 541, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 590, + 541, + 612 + ], + "spans": [ + { + "bbox": [ + 69, + 590, + 541, + 612 + ], + "type": "text", + "content": "[18] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 619, + 541, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 619, + 541, + 642 + ], + "spans": [ + { + "bbox": [ + 69, + 619, + 541, + 642 + ], + "type": "text", + "content": "[19] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman, et al. Evaluating large language models trained on code, 2021." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 647, + 541, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 541, + 681 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 541, + 681 + ], + "type": "text", + "content": "[20] Z. Chen, W. Chen, C. Smiley, S. Shah, I. Borova, D. Langdon, R. N. Moussa, M. I. Beane, T.-H. K. Huang, B. R. Routledge, and W. Y. Wang. FinQA: A dataset of numerical reasoning over financial data. ArXiv, abs/2109.00122, 2021. URL https://api-semanticscholar.org/CorpusID:235399966." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 689, + 541, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 689, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 69, + 689, + 541, + 721 + ], + "type": "text", + "content": "[21] J. Cho, A. Zala, and M. Bansal. DALL-eval: Probing the reasoning skills and social biases of text-to-image generation models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3043-3054, 2023." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 96 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 541, + 96 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 541, + 96 + ], + "type": "text", + "content": "[22] P. Clark, I. Cowhey, O. Etzioni, T. Khot, A. Sabharwal, C. Schoenick, and O. Tafjord. Think you have solved question answering? try ARC, the AI2 reasoning challenge. arXiv:1803.05457v1, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 102, + 541, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 102, + 541, + 136 + ], + "spans": [ + { + "bbox": [ + 69, + 102, + 541, + 136 + ], + "type": "text", + "content": "[23] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano, C. Hesse, and J. Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 143, + 541, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 143, + 541, + 168 + ], + "spans": [ + { + "bbox": [ + 69, + 143, + 541, + 168 + ], + "type": "text", + "content": "[24] X. Deng, Y. Gu, B. Zheng, S. Chen, S. Stevens, B. Wang, H. Sun, and Y. Su. Mind2Web: Towards a generalist agent for the web. In NeurIPS, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 174, + 541, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 174, + 541, + 229 + ], + "spans": [ + { + "bbox": [ + 69, + 174, + 541, + 229 + ], + "type": "text", + "content": "[25] J. Dhamala, T. Sun, V. Kumar, S. Krishna, Y. Pruksachatkun, K.-W. Chang, and R. Gupta. BOLD: Dataset and metrics for measuring biases in open-ended language generation. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT '21, page 862-872, New York, NY, USA, 2021. Association for Computing Machinery. ISBN 9781450383097. doi: 10.1145/3442188.3445924. URL https://doi.org/10.1145/3442188.3445924." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 236, + 541, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 236, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 69, + 236, + 541, + 262 + ], + "type": "text", + "content": "[26] D. Dua, Y. Wang, P. Dasigi, G. Stanovsky, S. Singh, and M. Gardner. DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs. In Proc. of NAACL, 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 267, + 541, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 267, + 541, + 303 + ], + "spans": [ + { + "bbox": [ + 69, + 267, + 541, + 303 + ], + "type": "text", + "content": "[27] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3-medium." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 308, + 541, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 308, + 541, + 334 + ], + "spans": [ + { + "bbox": [ + 69, + 308, + 541, + 334 + ], + "type": "text", + "content": "[28] P. Esser, S. Kulal, A. Blattmann, R. Entezari, J. Müller, H. Saini, Y. Levi, D. Lorenz, A. Sauer, F. Boesel, et al. Stable Diffusion 3.5. 2024. URL https://stability.ai/news/introducing-stable-diffusion-3-5." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 339, + 541, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 339, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 69, + 339, + 541, + 373 + ], + "type": "text", + "content": "[29] Frontier Model Forum. Amazon and Meta join the Frontier Model Forum to promote AI safety. https://www.frontiermodelforum.org/updates/amazon-and-meta-join-the-frontier-model-forum-t-o-promote-ai-safety/, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 380, + 541, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 380, + 541, + 406 + ], + "spans": [ + { + "bbox": [ + 69, + 380, + 541, + 406 + ], + "type": "text", + "content": "[30] G7 Hiroshima Summit. Hiroshima process international code of conduct for organizations developing advanced AI systems. https://www.mofa.go.jp/files/100573473.pdf, 2023. Accessed: 2024-11-20." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 411, + 541, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 411, + 541, + 468 + ], + "spans": [ + { + "bbox": [ + 69, + 411, + 541, + 468 + ], + "type": "text", + "content": "[31] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith. RealToxicityPrompts: Evaluating neural toxic degeneration in language models. In T. Cohn, Y. He, and Y. Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 3356-3369, Online, Nov. 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.findings-emnlp.301. URL https://aclanthology.org/2020-findings-emnlp.301." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 475, + 541, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 475, + 541, + 498 + ], + "spans": [ + { + "bbox": [ + 69, + 475, + 541, + 498 + ], + "type": "text", + "content": "[32] Gemini Team. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024. URL https://arxiv.org/abs/2403.05530." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 505, + 541, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 505, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 69, + 505, + 541, + 529 + ], + "type": "text", + "content": "[33] Google Deepmind. Gemini Flash. https://deepmind.google/technologies/gemini/flash/, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 535, + 541, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 535, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 535, + 541, + 559 + ], + "type": "text", + "content": "[34] N. Goyal, C. Gao, V. Chaudhary, P.-J. Chen, G. Wenzek, D. Ju, S. Krishnan, M. Ranzato, F. Guzmán, and A. Fan. The FLORES-101 evaluation benchmark for low-resource and multilingual machine translation. 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 566, + 541, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 566, + 541, + 590 + ], + "spans": [ + { + "bbox": [ + 69, + 566, + 541, + 590 + ], + "type": "text", + "content": "[35] F. Guzmán, P.-J. Chen, M. Ott, J. Pino, G. Lample, P. Koehn, V. Chaudhary, and M. Ranzato. Two new evaluation datasets for low-resource machine translation: Nepali-english and sinhala-english. 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 596, + 541, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 596, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 69, + 596, + 541, + 632 + ], + "type": "text", + "content": "[36] D. Hendrycks, C. Burns, S. Basart, A. Zou, M. Mazeika, D. Song, and J. Steinhardt. Measuring massive multitask language understanding. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=d7KBjmI3GmQ." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 638, + 541, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 638, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 69, + 638, + 541, + 662 + ], + "type": "text", + "content": "[37] D. Hendrycks, C. Burns, S. Kadavath, A. Arora, S. Basart, E. Tang, D. Song, and J. Steinhardt. Measuring mathematical problem solving with the MATH dataset. NeurIPS, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 668, + 541, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 668, + 541, + 703 + ], + "spans": [ + { + "bbox": [ + 69, + 668, + 541, + 703 + ], + "type": "text", + "content": "[38] Y. Hu, B. Liu, J. Kasai, Y. Wang, M. Ostendorf, R. Krishna, and N. A. Smith. TIFA: Accurate and interpretable text-to-image faithfulness evaluation with question answering. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20406-20417, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 709, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 709, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 709, + 541, + 723 + ], + "type": "text", + "content": "[39] R. Islam and O. M. Moushi. GPT-4o: The cutting-edge advancement in multimodal LLM. Technical report, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "type": "text", + "content": "[40] G. Kamradt. LLMTest NeedleInAHaystack, 2023. URL https://github.com/gkamradt/LLMTestNeedleInAHaystack/blob/main/README.md." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 102, + 541, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 102, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 70, + 102, + 541, + 125 + ], + "type": "text", + "content": "[41] D. P. Kingma. Auto-encoding variational Bayes. 2nd International Conference on Learning Representations, ICLR, 2014." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 133, + 541, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 133, + 541, + 168 + ], + "spans": [ + { + "bbox": [ + 70, + 133, + 541, + 168 + ], + "type": "text", + "content": "[42] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollár, and C. L. Zitnick. Microsoft COCO: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 175, + 541, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 175, + 541, + 198 + ], + "spans": [ + { + "bbox": [ + 69, + 175, + 541, + 198 + ], + "type": "text", + "content": "[43] J. Liu, Y. Song, B. Y. Lin, W. Lam, G. Neubig, Y. Li, and X. Yue. VisualWebBench: How far have multimodal llms evolved in web page understanding and grounding?, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 205, + 541, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 205, + 541, + 250 + ], + "spans": [ + { + "bbox": [ + 70, + 205, + 541, + 250 + ], + "type": "text", + "content": "[44] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao. MM-SafetyBench: A benchmark for safety evaluation of multimodal large language models. In A. Leonardis, E. Ricci, S. Roth, O. Russakovsky, T. Sattler, and G. Varol, editors, Computer Vision – ECCV 2024, pages 386–403, Cham, 2025. Springer Nature Switzerland. ISBN 978-3-031-72992-8." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 258, + 541, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 258, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 70, + 258, + 541, + 281 + ], + "type": "text", + "content": "[45] Llama Team, AI Meta. The Llama 3 herd of models, 2024. URL https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 289, + 541, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 289, + 541, + 323 + ], + "spans": [ + { + "bbox": [ + 69, + 289, + 541, + 323 + ], + "type": "text", + "content": "[46] P. Lu, B. Peng, H. Cheng, M. Galley, K.-W. Chang, Y. N. Wu, S.-C. Zhu, and J. Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In The 37th Conference on Neural Information Processing Systems (NeurIPS), 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 331, + 369, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 331, + 369, + 344 + ], + "spans": [ + { + "bbox": [ + 70, + 331, + 369, + 344 + ], + "type": "text", + "content": "[47] Luma Labs, 2024. URL https://lumalabs.ai/dream-machine." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 350, + 541, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 350, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 70, + 350, + 541, + 373 + ], + "type": "text", + "content": "[48] L. Madaan, A. K. Singh, R. Schaeffer, A. Poulton, S. Koyejo, P. Stenetorp, S. Narang, and D. Hupkes. Quantifying variance in evaluation benchmarks, 2024. URL https://arxiv.org/abs/2406.10229." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 380, + 541, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 380, + 541, + 404 + ], + "spans": [ + { + "bbox": [ + 70, + 380, + 541, + 404 + ], + "type": "text", + "content": "[49] K. Mangalam, R. Akshulakov, and J. Malik. EgoSchema: A diagnostic benchmark for very long-form video language understanding. In NeurIPS, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 411, + 541, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 411, + 541, + 435 + ], + "spans": [ + { + "bbox": [ + 69, + 411, + 541, + 435 + ], + "type": "text", + "content": "[50] A. Masry, D. X. Long, J. Q. Tan, S. Joty, and E. Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In ACL Findings, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 442, + 538, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 442, + 538, + 455 + ], + "spans": [ + { + "bbox": [ + 69, + 442, + 538, + 455 + ], + "type": "text", + "content": "[51] M. Mathew, D. Karatzas, and C. Jawahar. DocVQA: A dataset for VQA on document images. In WACV, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 462, + 541, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 462, + 541, + 497 + ], + "spans": [ + { + "bbox": [ + 70, + 462, + 541, + 497 + ], + "type": "text", + "content": "[52] N. Mehrabi, P. Goyal, C. Dupuy, Q. Hu, S. Ghosh, R. Zemel, K.-W. Chang, A. Galstyan, and R. Gupta. FLIRT: Feedback loop in-context red teaming. In EMNLP 2024, 2024. URL https://www.amazon.science/publications/flirt-feedback-loop-in-context-red-teaming." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 503, + 541, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 503, + 541, + 537 + ], + "spans": [ + { + "bbox": [ + 70, + 503, + 541, + 537 + ], + "type": "text", + "content": "[53] Meta. Llama 3.2 Github model card vision. https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md#instruction-tuned-models, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 545, + 541, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 545, + 541, + 569 + ], + "spans": [ + { + "bbox": [ + 69, + 545, + 541, + 569 + ], + "type": "text", + "content": "[54] Y. Onoe, S. Rane, Z. Berger, Y. Bitton, J. Cho, R. Garg, A. Ku, Z. Parekh, J. Pont-Tuset, G. Tanzer, et al. DOCCI: Descriptions of connected and contrasting images. URL https://arxiv.org/abs/2404.19753." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 576, + 541, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 576, + 541, + 600 + ], + "spans": [ + { + "bbox": [ + 69, + 576, + 541, + 600 + ], + "type": "text", + "content": "[55] OpenAI. GPT 4o mini. https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 607, + 514, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 607, + 514, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 607, + 514, + 620 + ], + "type": "text", + "content": "[56] OpenAI. Hello GPT 4o. https://openai.com/index/hello-gpt-4o, 2024. Accessed: 2024-11-20." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 627, + 488, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 627, + 488, + 639 + ], + "spans": [ + { + "bbox": [ + 69, + 627, + 488, + 639 + ], + "type": "text", + "content": "[57] OpenAI Team. simple evals GPT4, 2024. URL https://github.com/openai/simple-evals." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 647, + 541, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 541, + 670 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 541, + 670 + ], + "type": "text", + "content": "[58] OpenAI Team. o1 mini system card, 2024. URL https://cdn.openai.com/o1-system-card-20240917.pdf." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 677, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 541, + 723 + ], + "type": "text", + "content": "[59] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe. Training language models to follow instructions with human feedback. In Advances in Neural Information Processing Systems, volume 35, pages 27730-27744, 2022." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "type": "text", + "content": "[60] S. G. Patil, T. Zhang, X. Wang, and J. E. Gonzalez. Gorilla: Large language model connected with massive APIs, 2023. URL https://arxiv.org/abs/2305.15334." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 100, + 431, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 100, + 431, + 114 + ], + "spans": [ + { + "bbox": [ + 70, + 100, + 431, + 114 + ], + "type": "text", + "content": "[61] W. Peebles and S. Xie. Scalable diffusion models with transformers. In ICCV, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 118, + 541, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 118, + 541, + 153 + ], + "spans": [ + { + "bbox": [ + 70, + 118, + 541, + 153 + ], + "type": "text", + "content": "[62] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn. Direct preference optimization: Your language model is secretly a reward model. In Thirty-seventh Conference on Neural Information Processing Systems, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 159, + 541, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 159, + 541, + 247 + ], + "spans": [ + { + "bbox": [ + 70, + 159, + 541, + 247 + ], + "type": "text", + "content": "[63] R. Rei, J. G. C. de Souza, D. Alves, C. Zerva, A. C. Farinha, T. Glushkova, A. Lavie, L. Coheur, and A. F. T. Martins. COMET-22: Unbabel-IST 2022 submission for the metrics shared task. In P. Koehn, L. Barrault, O. Bojar, F. Bougares, R. Chatterjee, M. R. Costa-jussa, C. Federmann, M. Fishel, A. Fraser, M. Freitag, Y. Graham, R. Grundkiewicz, P. Guzman, B. Haddow, M. Huck, A. Jimeno Yepes, T. Kocmi, A. Martins, M. Morishita, C. Monz, M. Nagata, T. Nakazawa, M. Negri, A. Néveol, M. Neves, M. Popel, M. Turchi, and M. Zampieri, editors, Proceedings of the Seventh Conference on Machine Translation (WMT), pages 578–585, Abu Dhabi, United Arab Emirates (Hybrid), Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.wmt-1.52." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 253, + 541, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 253, + 541, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 253, + 541, + 277 + ], + "type": "text", + "content": "[64] D. Rein, B. L. Hou, A. C. Stickland, J. Petty, R. Y. Pang, J. Dirani, J. Michael, and S. R. Bowman. GPQA: A graduate-level google-proof Q&A benchmark, 2023. URL https://arxiv.org/abs/2311.12022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 282, + 502, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 282, + 502, + 296 + ], + "spans": [ + { + "bbox": [ + 69, + 282, + 502, + 296 + ], + "type": "text", + "content": "[65] Runway Research, 2024. URL https://runwayml.com/research/introducing-gen-3-alpha." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 300, + 541, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 300, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 70, + 300, + 541, + 335 + ], + "type": "text", + "content": "[66] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. L. Denton, K. Ghasemipour, R. Gontijo Lopes, B. Karagol Ayan, T. Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 340, + 541, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 340, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 70, + 340, + 541, + 374 + ], + "type": "text", + "content": "[67] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom. Toolformer: Language models can teach themselves to use tools. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=Yacmpz84TH." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 380, + 541, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 380, + 541, + 393 + ], + "spans": [ + { + "bbox": [ + 69, + 380, + 541, + 393 + ], + "type": "text", + "content": "[68] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 398, + 541, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 398, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 70, + 398, + 541, + 453 + ], + "type": "text", + "content": "[69] U. Shaham, M. Ivgi, A. Efrat, J. Berant, and O. Levy. ZeroSCROLLS: A zero-shot benchmark for long text understanding. In H. Bouamor, J. Pino, and K. Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 7977-7989, Singapore, Dec. 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.536. URL https://aclanthology.org/2023-findings-emnlp.536." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 460, + 541, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 460, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 69, + 460, + 541, + 483 + ], + "type": "text", + "content": "[70] A. Singh, V. Natarajan, M. Shah, Y. Jiang, X. Chen, D. Batra, D. Parikh, and M. Rohrbach. Towards VQA models that can read. In CVPR, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 488, + 541, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 488, + 541, + 512 + ], + "spans": [ + { + "bbox": [ + 69, + 488, + 541, + 512 + ], + "type": "text", + "content": "[71] K. Sun, K. Huang, X. Liu, Y. Wu, Z. Xu, Z. Li, and X. Liu. T2V-CompBench: A comprehensive benchmark for compositional text-to-video generation. arXiv preprint arXiv:2407.14505, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 517, + 541, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 517, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 517, + 541, + 552 + ], + "type": "text", + "content": "[72] M. Suzgun, N. Scales, N. Scharli, S. Gehrmann, Y. Tay, H. W. Chung, A. Chowdhery, Q. V. Le, E. H. Chi, D. Zhou, , and J. Wei. Challenging BIG-Bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 557, + 541, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 557, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 70, + 557, + 541, + 613 + ], + "type": "text", + "content": "[73] N. Team, M. R. Costa-jussa, J. Cross, O. Celebi, M. Elbayad, K. Heafield, K. Heffernan, E. Kalbassi, J. Lam, D. Licht, J. Maillard, A. Sun, S. Wang, G. Wenzek, A. Youngblood, B. Akula, L. Barrault, G. M. Gonzalez, P. Hansanti, J. Hoffman, S. Jarrett, K. R. Sadagopan, D. Rowe, S. Spruit, C. Tran, P. Andrews, N. F. Ayan, S. Bhosale, S. Edunov, A. Fan, C. Gao, V. Goswami, F. Guzmán, P. Koehn, A. Mourachko, C. Ropers, S. Saleem, H. Schwenk, and J. Wang. No language left behind: Scaling human-centered machine translation. 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 619, + 541, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 619, + 541, + 643 + ], + "spans": [ + { + "bbox": [ + 69, + 619, + 541, + 643 + ], + "type": "text", + "content": "[74] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need, 2023. URL https://arxiv.org/abs/1706.03762." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 648, + 541, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 648, + 541, + 671 + ], + "spans": [ + { + "bbox": [ + 69, + 648, + 541, + 671 + ], + "type": "text", + "content": "[75] R. Vedantam, C. L. Zitnick, and D. Parikh. CIDEr: Consensus-based Image Description Evaluation. In CVPR, 2015." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 677, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 677, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 70, + 677, + 541, + 723 + ], + "type": "text", + "content": "[76] A. Wang, R. Y. Pang, A. Chen, J. Phang, and S. R. Bowman. SQuALITY: Building a long-document summarization dataset the hard way. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 1139–1156, Abu Dhabi, United Arab Emirates, Dec. 2022. Association for Computational Linguistics. URL https://aclanthology.org/2022.emnlp-main.75." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 507 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 541, + 95 + ], + "type": "text", + "content": "[77] W. Wang, Z. He, W. Hong, Y. Cheng, X. Zhang, J. Qi, X. Gu, S. Huang, B. Xu, Y. Dong, et al. LVBench: An extreme long video understanding benchmark. arXiv preprint arXiv:2406.08035, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 101, + 541, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 101, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 70, + 101, + 541, + 125 + ], + "type": "text", + "content": "[78] X. Wang, J. Wu, J. Chen, L. Li, Y.-F. Wang, and W. Y. Wang. VATEX: A large-scale, high-quality multilingual dataset for video-and-language research. In ICCV, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 131, + 541, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 131, + 541, + 176 + ], + "spans": [ + { + "bbox": [ + 70, + 131, + 541, + 176 + ], + "type": "text", + "content": "[79] J. Wei, X. Wang, D. Schuurmans, M. Bosma, B. Ichter, F. Xia, E. H. Chi, Q. V. Le, and D. Zhou. Chain-of-thought prompting elicits reasoning in large language models. In Proceedings of the 36th International Conference on Neural Information Processing Systems, NIPS '22, Red Hook, NY, USA, 2024. Curran Associates Inc. ISBN 9781713871088." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 182, + 541, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 182, + 541, + 207 + ], + "spans": [ + { + "bbox": [ + 69, + 182, + 541, + 207 + ], + "type": "text", + "content": "[80] J. Xu, X. Liu, Y. Wu, Y. Tong, Q. Li, M. Ding, J. Tang, and Y. Dong. ImageReward: Learning and evaluating human preferences for text-to-image generation. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 213, + 541, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 213, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 213, + 541, + 236 + ], + "type": "text", + "content": "[81] F. Yan, H. Mao, C. C.-J. Ji, T. Zhang, S. G. Patil, I. Stoica, and J. E. Gonzalez. Berkeley function calling leaderboard. 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 242, + 541, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 242, + 541, + 288 + ], + "spans": [ + { + "bbox": [ + 70, + 242, + 541, + 288 + ], + "type": "text", + "content": "[82] X. Yang, K. Sun, H. Xin, Y. Sun, N. Bhalla, X. Chen, S. Choudhary, R. D. Gui, Z. W. Jiang, Z. Jiang, L. Kong, B. Moran, J. Wang, Y. E. Xu, A. Yan, C. Yang, E. Yuan, H. Zha, N. Tang, L. Chen, N. Scheffer, Y. Liu, N. Shah, R. Wanga, A. Kumar, W. tau Yih, and X. L. Dong. Crag – comprehensive rag benchmark. arXiv preprint arXiv:2406.04744, 2024. URL https://arxiv.org/abs/2406.04744." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 293, + 541, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 293, + 541, + 318 + ], + "spans": [ + { + "bbox": [ + 70, + 293, + 541, + 318 + ], + "type": "text", + "content": "[83] S. Yao, J. Zhao, D. Yu, N. Du, I. Shafran, K. Narasimhan, and Y. Cao. ReAct: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 323, + 541, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 323, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 70, + 323, + 541, + 348 + ], + "type": "text", + "content": "[84] J. Yu, Y. Xu, J. Y. Koh, T. Luong, G. Baid, Z. Wang, V. Vasudevan, A. Ku, Y. Yang, B. K. Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 354, + 541, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 354, + 541, + 388 + ], + "spans": [ + { + "bbox": [ + 70, + 354, + 541, + 388 + ], + "type": "text", + "content": "[85] X. Yue, Y. Ni, K. Zhang, T. Zheng, R. Liu, G. Zhang, S. Stevens, D. Jiang, W. Ren, Y. Sun, C. Wei, B. Yu, R. Yuan, R. Sun, M. Yin, B. Zheng, Z. Yang, Y. Liu, W. Huang, H. Sun, Y. Su, and W. Chen. MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 393, + 541, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 393, + 541, + 407 + ], + "spans": [ + { + "bbox": [ + 69, + 393, + 541, + 407 + ], + "type": "text", + "content": "[86] B. Zheng, B. Gou, J. Kil, H. Sun, and Y. Su. GPT-4V(ison) is a generalist web agent, if grounded. In ICML, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 413, + 541, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 413, + 541, + 437 + ], + "spans": [ + { + "bbox": [ + 70, + 413, + 541, + 437 + ], + "type": "text", + "content": "[87] L. Zheng, Z. Huang, Z. Xue, X. Wang, B. An, and S. Yan. AgentStudio: A toolkit for building general virtual agents. arXiv preprint arXiv:2403.17918, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 443, + 541, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 443, + 541, + 478 + ], + "spans": [ + { + "bbox": [ + 69, + 443, + 541, + 478 + ], + "type": "text", + "content": "[88] M. Zhong, A. Zhang, X. Wang, R. Hou, W. Xiong, C. Zhu, Z. Chen, L. Tan, C. Bi, M. Lewis, S. Popuri, S. Narang, M. Kambadur, D. Mahajan, S. Edunov, J. Han, and L. van der Maaten. Law of the weakest link: Cross capabilities of large language models. arXiv preprint arXiv:2409.19951, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 483, + 541, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 483, + 541, + 507 + ], + "spans": [ + { + "bbox": [ + 69, + 483, + 541, + 507 + ], + "type": "text", + "content": "[89] J. Zhou, T. Lu, S. Mishra, S. Brahma, S. Basu, Y. Luan, D. Zhou, and L. Hou. Instruction-following evaluation for large language models, 2023. URL https://arxiv.org/abs/2311.07911." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 740, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 740, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 740, + 309, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 270, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 270, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 270, + 85 + ], + "type": "text", + "content": "A Amazon Nova Canvas Capabilities" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 95, + 447, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 447, + 107 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 447, + 107 + ], + "type": "text", + "content": "Our Nova Canvas model offers the following functionalities, with examples given in Figure 5." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 96, + 115, + 539, + 267 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "spans": [ + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "type": "text", + "content": "- Text-to-image generation allows customers to create images with various resolutions (from " + }, + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "type": "text", + "content": " up to " + }, + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "type": "inline_equation", + "content": "2\\mathrm{K} \\times 2\\mathrm{K}" + }, + { + "bbox": [ + 96, + 115, + 538, + 137 + ], + "type": "text", + "content": " resolution)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 141, + 538, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 141, + 538, + 174 + ], + "spans": [ + { + "bbox": [ + 96, + 141, + 538, + 174 + ], + "type": "text", + "content": "- Editing allows developers to edit images using a combination of text prompt or mask image. Amazon Nova Canvas supports text-to-image editing and image-to-image editing, including inpainting, outpainting and object removal." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 178, + 539, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 178, + 539, + 200 + ], + "spans": [ + { + "bbox": [ + 96, + 178, + 539, + 200 + ], + "type": "text", + "content": "- Image variation allows customers to output images with similar contents but with variations from the user provided ones." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 96, + 205, + 539, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 205, + 539, + 226 + ], + "spans": [ + { + "bbox": [ + 96, + 205, + 539, + 226 + ], + "type": "text", + "content": "- Image conditioning provide a reference image along with a text prompt, resulting in outputs that follow the layout and structure of the user-supplied reference." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 96, + 230, + 539, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 230, + 539, + 252 + ], + "spans": [ + { + "bbox": [ + 96, + 230, + 539, + 252 + ], + "type": "text", + "content": "- Image guidance with color palette allows customers to precisely control the color palette of generated images by providing a list of hex codes along with the text prompt." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 256, + 498, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 256, + 498, + 267 + ], + "spans": [ + { + "bbox": [ + 96, + 256, + 498, + 267 + ], + "type": "text", + "content": "- Background removal automatically removes background from images containing multiple objects." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 189, + 125, + 286, + 222 + ], + "blocks": [ + { + "bbox": [ + 102, + 168, + 179, + 187 + ], + "lines": [ + { + "bbox": [ + 102, + 168, + 179, + 187 + ], + "spans": [ + { + "bbox": [ + 102, + 168, + 179, + 187 + ], + "type": "text", + "content": "A dinosaur sitting in a tea cup" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 189, + 125, + 286, + 222 + ], + "lines": [ + { + "bbox": [ + 189, + 125, + 286, + 222 + ], + "spans": [ + { + "bbox": [ + 189, + 125, + 286, + 222 + ], + "type": "image", + "image_path": "76e139eb48ebbe67c6ae23af9d987841e615fe510fc47192d81ab31c91976ad0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 166, + 402, + 221 + ], + "blocks": [ + { + "bbox": [ + 306, + 166, + 402, + 221 + ], + "lines": [ + { + "bbox": [ + 306, + 166, + 402, + 221 + ], + "spans": [ + { + "bbox": [ + 306, + 166, + 402, + 221 + ], + "type": "image", + "image_path": "b6eeeaf2872be27c90e24802e500673f2edd6c10373e7d61cccf86e50079d449.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 346, + 225, + 477, + 237 + ], + "lines": [ + { + "bbox": [ + 346, + 225, + 477, + 237 + ], + "spans": [ + { + "bbox": [ + 346, + 225, + 477, + 237 + ], + "type": "text", + "content": "(b) Inpainting the image with swans" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 403, + 166, + 499, + 221 + ], + "blocks": [ + { + "bbox": [ + 403, + 166, + 499, + 221 + ], + "lines": [ + { + "bbox": [ + 403, + 166, + 499, + 221 + ], + "spans": [ + { + "bbox": [ + 403, + 166, + 499, + 221 + ], + "type": "image", + "image_path": "a0f9f9877678b8c10c66531b68e095c59e1aa8c5547165a942d51f5903c8cfd2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 93, + 248, + 189, + 346 + ], + "blocks": [ + { + "bbox": [ + 125, + 226, + 272, + 237 + ], + "lines": [ + { + "bbox": [ + 125, + 226, + 272, + 237 + ], + "spans": [ + { + "bbox": [ + 125, + 226, + 272, + 237 + ], + "type": "text", + "content": "(a) Image generation from a text prompt" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 93, + 248, + 189, + 346 + ], + "lines": [ + { + "bbox": [ + 93, + 248, + 189, + 346 + ], + "spans": [ + { + "bbox": [ + 93, + 248, + 189, + 346 + ], + "type": "image", + "image_path": "f067e42572ed86ce39b690f5084a986840fe5c7a607cd71a992dbb2c6eca7bd7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 190, + 258, + 286, + 355 + ], + "blocks": [ + { + "bbox": [ + 97, + 350, + 184, + 357 + ], + "lines": [ + { + "bbox": [ + 97, + 350, + 184, + 357 + ], + "spans": [ + { + "bbox": [ + 97, + 350, + 184, + 357 + ], + "type": "text", + "content": "change flowers to orange color" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 190, + 258, + 286, + 355 + ], + "lines": [ + { + "bbox": [ + 190, + 258, + 286, + 355 + ], + "spans": [ + { + "bbox": [ + 190, + 258, + 286, + 355 + ], + "type": "image", + "image_path": "c5cb0ef9a0a9732d58e0989d6f1191010a2c9f768a1103091663080bd660e849.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 267, + 402, + 364 + ], + "blocks": [ + { + "bbox": [ + 306, + 267, + 402, + 364 + ], + "lines": [ + { + "bbox": [ + 306, + 267, + 402, + 364 + ], + "spans": [ + { + "bbox": [ + 306, + 267, + 402, + 364 + ], + "type": "image", + "image_path": "bd620d782bd91e54f3c9b6c828c79489b562e1ef53f3d5fefaf88840c93693bb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 349, + 369, + 474, + 380 + ], + "lines": [ + { + "bbox": [ + 349, + 369, + 474, + 380 + ], + "spans": [ + { + "bbox": [ + 349, + 369, + 474, + 380 + ], + "type": "text", + "content": "(d) Outpainting a new background" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 403, + 267, + 499, + 364 + ], + "blocks": [ + { + "bbox": [ + 403, + 267, + 499, + 364 + ], + "lines": [ + { + "bbox": [ + 403, + 267, + 499, + 364 + ], + "spans": [ + { + "bbox": [ + 403, + 267, + 499, + 364 + ], + "type": "image", + "image_path": "9adcd93c3a348a977378541a3eb9005b49c67785526792f626c35ec61a8e75a6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 93, + 392, + 189, + 488 + ], + "blocks": [ + { + "bbox": [ + 167, + 369, + 230, + 380 + ], + "lines": [ + { + "bbox": [ + 167, + 369, + 230, + 380 + ], + "spans": [ + { + "bbox": [ + 167, + 369, + 230, + 380 + ], + "type": "text", + "content": "(c) Image editing" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 93, + 392, + 189, + 488 + ], + "lines": [ + { + "bbox": [ + 93, + 392, + 189, + 488 + ], + "spans": [ + { + "bbox": [ + 93, + 392, + 189, + 488 + ], + "type": "image", + "image_path": "797ec36ddec755043f50d9c4c3a8db04f6feb57495ebcfb57ee80e75e8722356.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 190, + 402, + 286, + 498 + ], + "blocks": [ + { + "bbox": [ + 97, + 493, + 184, + 501 + ], + "lines": [ + { + "bbox": [ + 97, + 493, + 184, + 501 + ], + "spans": [ + { + "bbox": [ + 97, + 493, + 184, + 501 + ], + "type": "text", + "content": "a hamster eats apple slice" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 190, + 402, + 286, + 498 + ], + "lines": [ + { + "bbox": [ + 190, + 402, + 286, + 498 + ], + "spans": [ + { + "bbox": [ + 190, + 402, + 286, + 498 + ], + "type": "image", + "image_path": "baa82e589e4f25edd6757b4df2d5b65509e507cf1609604b25a0fb5bbd27d127.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 306, + 392, + 402, + 488 + ], + "blocks": [ + { + "bbox": [ + 306, + 392, + 402, + 488 + ], + "lines": [ + { + "bbox": [ + 306, + 392, + 402, + 488 + ], + "spans": [ + { + "bbox": [ + 306, + 392, + 402, + 488 + ], + "type": "image", + "image_path": "249480146ec018334d61af0070a064c6e4def2de70d75b161a1be51e5c5abf24.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 492, + 397, + 500 + ], + "lines": [ + { + "bbox": [ + 310, + 492, + 397, + 500 + ], + "spans": [ + { + "bbox": [ + 310, + 492, + 397, + 500 + ], + "type": "text", + "content": "A wooden boat in summer" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 403, + 402, + 499, + 498 + ], + "blocks": [ + { + "bbox": [ + 403, + 402, + 499, + 498 + ], + "lines": [ + { + "bbox": [ + 403, + 402, + 499, + 498 + ], + "spans": [ + { + "bbox": [ + 403, + 402, + 499, + 498 + ], + "type": "image", + "image_path": "f1232a213ee459c8d311a66e6cfa0b449d50930b4ecc8696660599b3d68a90b4.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 96, + 564, + 185, + 587 + ], + "blocks": [ + { + "bbox": [ + 96, + 564, + 185, + 587 + ], + "lines": [ + { + "bbox": [ + 96, + 564, + 185, + 587 + ], + "spans": [ + { + "bbox": [ + 96, + 564, + 185, + 587 + ], + "type": "image", + "image_path": "21668492523acdf3cb014570bfb648a5eb6d1b6bd74d14386e769c83ee12b86c.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 92, + 589, + 188, + 616 + ], + "lines": [ + { + "bbox": [ + 92, + 589, + 188, + 616 + ], + "spans": [ + { + "bbox": [ + 92, + 589, + 188, + 616 + ], + "type": "text", + "content": "A jar of salad dressing in a rustic kitchen surrounded by fresh vegetables with studio lighting" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 190, + 536, + 286, + 632 + ], + "blocks": [ + { + "bbox": [ + 167, + 512, + 230, + 522 + ], + "lines": [ + { + "bbox": [ + 167, + 512, + 230, + 522 + ], + "spans": [ + { + "bbox": [ + 167, + 512, + 230, + 522 + ], + "type": "text", + "content": "(e) Style transfer" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 190, + 536, + 286, + 632 + ], + "lines": [ + { + "bbox": [ + 190, + 536, + 286, + 632 + ], + "spans": [ + { + "bbox": [ + 190, + 536, + 286, + 632 + ], + "type": "image", + "image_path": "50ddd1488f51b07c178b9608ddaee633be7d742f40fcd2b07ebd87617516fae9.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 635, + 257, + 647 + ], + "lines": [ + { + "bbox": [ + 140, + 635, + 257, + 647 + ], + "spans": [ + { + "bbox": [ + 140, + 635, + 257, + 647 + ], + "type": "text", + "content": "(g) Controlling the color palette" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 105, + 652, + 503, + 665 + ], + "lines": [ + { + "bbox": [ + 105, + 652, + 503, + 665 + ], + "spans": [ + { + "bbox": [ + 105, + 652, + 503, + 665 + ], + "type": "text", + "content": "Figure 5: Example capabilities of Amazon Nova Canvas, our content generation model for images." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 306, + 535, + 402, + 632 + ], + "blocks": [ + { + "bbox": [ + 372, + 512, + 451, + 523 + ], + "lines": [ + { + "bbox": [ + 372, + 512, + 451, + 523 + ], + "spans": [ + { + "bbox": [ + 372, + 512, + 451, + 523 + ], + "type": "text", + "content": "(f) Guided generation" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 535, + 402, + 632 + ], + "lines": [ + { + "bbox": [ + 306, + 535, + 402, + 632 + ], + "spans": [ + { + "bbox": [ + 306, + 535, + 402, + 632 + ], + "type": "image", + "image_path": "a676fc75d5145c13756d1582514a2a8d24bb66faa47989ebc9fadd33faf862de.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 635, + 458, + 647 + ], + "lines": [ + { + "bbox": [ + 364, + 635, + 458, + 647 + ], + "spans": [ + { + "bbox": [ + 364, + 635, + 458, + 647 + ], + "type": "text", + "content": "(h) Background Removal" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 408, + 563, + 490, + 618 + ], + "blocks": [ + { + "bbox": [ + 408, + 563, + 490, + 618 + ], + "lines": [ + { + "bbox": [ + 408, + 563, + 490, + 618 + ], + "spans": [ + { + "bbox": [ + 408, + 563, + 490, + 618 + ], + "type": "image", + "image_path": "1c1166d99bcb5b4b726864c65e11014aa1fe34f34def76dd67e8b8644c2f3a38.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 201, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 201, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 201, + 86 + ], + "type": "text", + "content": "B Prompts and Scoring" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 95, + 542, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 95, + 542, + 118 + ], + "spans": [ + { + "bbox": [ + 68, + 95, + 542, + 118 + ], + "type": "text", + "content": "Prompt templates used for Amazon Nova evaluations are given below, along with those used for select other public models where noted. Additional materials and evaluation results from this report can be found at:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 216, + 128, + 393, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 128, + 393, + 140 + ], + "spans": [ + { + "bbox": [ + 216, + 128, + 393, + 140 + ], + "type": "text", + "content": "https://huggingface.co.amazon-agi" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 152, + 162, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 152, + 162, + 163 + ], + "spans": [ + { + "bbox": [ + 69, + 152, + 162, + 163 + ], + "type": "text", + "content": "B.1 Text evaluation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 172, + 212, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 172, + 212, + 185 + ], + "spans": [ + { + "bbox": [ + 69, + 172, + 212, + 185 + ], + "type": "text", + "content": "B.1.1 Language Understanding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 190, + 123, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 190, + 123, + 201 + ], + "spans": [ + { + "bbox": [ + 69, + 190, + 123, + 201 + ], + "type": "text", + "content": "For MMLU:" + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 69, + 209, + 538, + 254 + ], + "blocks": [ + { + "bbox": [ + 69, + 209, + 538, + 254 + ], + "lines": [ + { + "bbox": [ + 69, + 209, + 538, + 254 + ], + "spans": [ + { + "bbox": [ + 69, + 209, + 538, + 254 + ], + "type": "text", + "content": "What is the correct answer to this question: \nChoices: . Let's think step by step: \nBased on the above, what is the single, most likely answer choice? Answer in the format \"The correct answer is (insert answer here).\"" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 266, + 122, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 266, + 122, + 277 + ], + "spans": [ + { + "bbox": [ + 69, + 266, + 122, + 277 + ], + "type": "text", + "content": "For ARC-C:" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 69, + 285, + 534, + 342 + ], + "blocks": [ + { + "bbox": [ + 69, + 285, + 534, + 342 + ], + "lines": [ + { + "bbox": [ + 69, + 285, + 534, + 342 + ], + "spans": [ + { + "bbox": [ + 69, + 285, + 534, + 342 + ], + "type": "text", + "content": "Given the following question and four candidate answers (A, B, C and D), choose the best answer. \nQuestion: \nYour response should end with \"The best answer is [the_answer_letter]\" where the [the_answer_letter] is one of A, B, C or D." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 354, + 117, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 117, + 365 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 117, + 365 + ], + "type": "text", + "content": "For DROP:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 365, + 189, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 365, + 189, + 376 + ], + "spans": [ + { + "bbox": [ + 69, + 365, + 189, + 376 + ], + "type": "text", + "content": "We use the following 6 shots:" + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 78, + 384, + 490, + 723 + ], + "blocks": [ + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "lines": [ + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "spans": [ + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": "- answer: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " According to the passage, the European Coal and Steel Community was established in 1951 and became the EEC in 1958. 1958 - 1951 = 7. So the answer is 7 \npassage: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " Since the 1970s, U.S. governments have negotiated managed-trade agreements, such as the North American Free Trade Agreement in the 1990s, the Dominican Republic-Central America Free Trade Agreement in 2006, and a number of bilateral agreements. In Europe, six countries formed the European Coal and Steel Community in 1951 which became the European Economic Community in 1958. Two core objectives of the EEC were the development of a common market, subsequently renamed the single market, and establishing a customs union between its member states. question: How many years did the European Coal and Steel Community exist? \n- answer: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " According to the passage, " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "23.5\\%" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " ages 18 to 24. " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "23.5\\%" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " \npassage: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " In the county, the population was spread out with " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "23.50\\%" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " 18, " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "8.70\\%" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "13.30\\%" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " \nquestion: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " How many more percent are under the age of 18 compared to the 18 to 24 group? \n- answer: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " According to the passage, Stafford threw 5 TD passes, 3 of which were to Johnson. " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "5 - 3 = 2" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " . So the answer is 2 \npassage: " + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "inline_equation", + "content": "> -" + }, + { + "bbox": [ + 78, + 384, + 490, + 723 + ], + "type": "text", + "content": " Playing in their second straight Thanksgiving game, the Eagles struggled especially on defense, where they were unable to stop the much-hyped Lions offense. The worst of it all was how unproven rookie Eric Rowe was tasked" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 67, + 69, + 544, + 602 + ], + "blocks": [ + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "lines": [ + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "spans": [ + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": "with covering wide receiver Calvin Johnson, leading to Johnson catching 3 \ntouchdowns. Stafford's five passing touchdowns, including three of them to \nJohnson was too much for the Eagles to overcome and for the second \nconsecutive time this season, the Eagles gave up 45 points in a game. With \nthe loss, the Eagles drop to 4-7 on the season and 6-1 when playing on \nThanksgiving. \nquestion: How many TD passes did Stafford throw other than to Johnson? \n- answer: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " All the touchdown runs are: a 27-yard touchdown run, a 9-yard touchdown run, a 11-yard touchdown run. The smallest number among 27, 9, 11 is 9. So the shortest touchdown run was 9 yards. All the touchdown passes are: a 12-yard touchdown pass. So the longest touchdown pass was 12 yards. So the shortest touchdown run and the longest touchdown pass combine for " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": "9 + 12 =" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " 21 yards. So the answer is 21 passage: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " The Seahawks played the San Francisco 49ers. In the first quarter, the Hawks RB Julius Jones got a 27-yard TD run, along with DT Craig Terrill returning a fumble 9 yards for a touchdown. In the third quarter, the 49ers almost rallied as RB H. J. Torres made a 12-yard TD pass to Lucas Nelly, along with Mare kicking a 32-yard field goal. In the final quarter, Julius Jones got another 11-yard TD. question: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " How many yards do the shortest touchdown run and the longest touchdown pass combine for? \n- answer: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. So the Ravens had 10 points at halftime. So the answer is 10 passage: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " The Steelers went home for a duel with the Baltimore Ravens. Pittsburgh would deliver the opening punch in the first quarter with a 1-yard touchdown from running back Rashard Mendenhall. The Ravens would make it even as running back Willis McGahee got a 9-yard TD. The Ravens kicker Billy Cundiff got a 45-yard field goal in the second quarter, concluding the first half with a 10-7 lead. The Steelers brought the game into overtime with a 38-yard field goal by Andrew Foster. The Ravens Billy Cundiff pulled off a winning 33-yard field goal in overtime. question: How many points did the Ravens have at halftime? \n- answer: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " The first and third quarters were the scoreless quarters. So there are 2 scoreless quarters. So the answer is 2 passage: " + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "inline_equation", + "content": ">" + }, + { + "bbox": [ + 67, + 69, + 544, + 602 + ], + "type": "text", + "content": " The Vikings flew to Bank of America Stadium to face the Carolina Panthers. After a scoreless first quarter, Carolina got on the board with quarterback Matt Moore finding fullback Brad Hoover on a 1-yard TD pass. After yet another scoreless quarter, Carolina sealed the game as Matt Moore completed a 42-yard touchdown pass to wide receiver Steve Smith. question: How many scoreless quarters were there?" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 611, + 276, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 611, + 276, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 611, + 276, + 624 + ], + "type": "text", + "content": "For each shot we provide the following instruction:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 629, + 533, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 629, + 533, + 653 + ], + "spans": [ + { + "bbox": [ + 69, + 629, + 533, + 653 + ], + "type": "text", + "content": "Conclude your answer with: \"So the answer is {final answer}\". Make sure the final answer is in plain text format" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 667, + 247, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 667, + 247, + 679 + ], + "spans": [ + { + "bbox": [ + 69, + 667, + 247, + 679 + ], + "type": "text", + "content": "And we create each user prompt as follows:" + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 69, + 685, + 141, + 718 + ], + "blocks": [ + { + "bbox": [ + 69, + 685, + 141, + 718 + ], + "lines": [ + { + "bbox": [ + 69, + 685, + 141, + 718 + ], + "spans": [ + { + "bbox": [ + 69, + 685, + 141, + 718 + ], + "type": "text", + "content": " " + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "xml" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 308, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 118, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 118, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 118, + 83 + ], + "type": "text", + "content": "For IFEval:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 83, + 340, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 83, + 340, + 95 + ], + "spans": [ + { + "bbox": [ + 69, + 83, + 340, + 95 + ], + "type": "text", + "content": "No particular prompt was added (query was inputted to the model)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 99, + 111, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 99, + 111, + 110 + ], + "spans": [ + { + "bbox": [ + 69, + 99, + 111, + 110 + ], + "type": "text", + "content": "For BBH:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 110, + 292, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 110, + 292, + 122 + ], + "spans": [ + { + "bbox": [ + 69, + 110, + 292, + 122 + ], + "type": "text", + "content": "We use a preamble that describes the task, for example:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 128, + 337, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 128, + 337, + 140 + ], + "spans": [ + { + "bbox": [ + 69, + 128, + 337, + 140 + ], + "type": "text", + "content": "Evaluate the result of a random Boolean expression." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 152, + 311, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 152, + 311, + 165 + ], + "spans": [ + { + "bbox": [ + 69, + 152, + 311, + 165 + ], + "type": "text", + "content": "We then provide few shot examples in the following format:" + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 69, + 169, + 375, + 225 + ], + "blocks": [ + { + "bbox": [ + 69, + 169, + 375, + 225 + ], + "lines": [ + { + "bbox": [ + 69, + 169, + 375, + 225 + ], + "spans": [ + { + "bbox": [ + 69, + 169, + 375, + 225 + ], + "type": "text", + "content": "< preamble> \nQuestion: \n \nLet's think step by step. \n. So the answer is " + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 237, + 202, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 237, + 202, + 250 + ], + "spans": [ + { + "bbox": [ + 69, + 237, + 202, + 250 + ], + "type": "text", + "content": "And we follow this by the query:" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 69, + 255, + 201, + 299 + ], + "blocks": [ + { + "bbox": [ + 69, + 255, + 201, + 299 + ], + "lines": [ + { + "bbox": [ + 69, + 255, + 201, + 299 + ], + "spans": [ + { + "bbox": [ + 69, + 255, + 201, + 299 + ], + "type": "text", + "content": "< preamble> \nQuestion: \n \nLet's think step by step." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 312, + 356, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 312, + 356, + 323 + ], + "spans": [ + { + "bbox": [ + 69, + 312, + 356, + 323 + ], + "type": "text", + "content": "For each subject, We provide the subject-specific instructions as below:" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 69, + 329, + 526, + 723 + ], + "blocks": [ + { + "bbox": [ + 69, + 329, + 526, + 723 + ], + "lines": [ + { + "bbox": [ + 69, + 329, + 526, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 329, + 526, + 723 + ], + "type": "text", + "content": "- subject: booleanExpressions\n instruction: Conclude your answer with: \"So the answer is True or False.\"\n- subject: causal_judgement\n instruction: Conclude your answer with: \"So the answer is Yes or No.\"\n- subject: date_understanding\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: disambiguation_qa\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: dycklanguages\n instruction: Correctly close a Dyck-n word. Conclude your answer with: \"So the answer is {final answer}.\". Make sure the final answer is in plain text format\n- subject: formal_fallacies\n instruction: Conclude your answer with: \"So the answer is valid or invalid.\"\n- subject: geometric_shapes\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: hyperbaton\n instruction: Conclude your answer with: \"\\So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deductionfive Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deduction-seven Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: logical_deduction_three Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: movie Recommendation\n instruction: Conclude your answer with: \"So the answer is (answer_letter).\" . Where answer_letter is A, or B, or ...\n- subject: multistep_arithmetic_two\n instruction: Conclude your answer with: \"So the answer is {final answer}.\". Make sure the final answer is in plain text format" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "yaml" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 69, + 73, + 541, + 491 + ], + "blocks": [ + { + "bbox": [ + 69, + 73, + 541, + 491 + ], + "lines": [ + { + "bbox": [ + 69, + 73, + 541, + 491 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 541, + 491 + ], + "type": "text", + "content": "- subject: navigate\n instruction: Conclude your answer with: \"So the answer is Yes or No\".\n- subject: object_counting\n instruction: Conclude your answer with: \"So the answer is .\". Where is an integer\n- subject: penguins_in_a_table\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: reasoning_about_colored Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: ruin_names\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: salient Translation_error_detector\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: snarks\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: sports-understanding\n instruction: Conclude your answer with: \"So the answer is yes or no\".\n- subject: temporal_sequences\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjectsFive Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjects-seven Objects\n instruction: Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: tracking_shuffledobjects_three Objects\n instruction: \"Conclude your answer with: \"So the answer is (answer_letter)\". Where answer_letter is A, or B, or ...\n- subject: web_of Lies\n instruction: Conclude your answer with: \"So the answer is Yes or No\".\n- subject: wordsorting\n instruction: Conclude your answer with: \"So the answer is word_1 word_2 ... word_n\".\"" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "yaml" + }, + { + "bbox": [ + 69, + 506, + 118, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 506, + 118, + 517 + ], + "spans": [ + { + "bbox": [ + 69, + 506, + 118, + 517 + ], + "type": "text", + "content": "For GPQA:" + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 67, + 529, + 539, + 575 + ], + "blocks": [ + { + "bbox": [ + 67, + 529, + 539, + 575 + ], + "lines": [ + { + "bbox": [ + 67, + 529, + 539, + 575 + ], + "spans": [ + { + "bbox": [ + 67, + 529, + 539, + 575 + ], + "type": "text", + "content": "What is the correct answer to this question: \nChoices: . Let's think step by step: \nBased on the above, what is the single, most likely answer choice? Answer in the format \"The correct answer is (insert answer here).\"" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 605, + 211, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 605, + 211, + 617 + ], + "spans": [ + { + "bbox": [ + 69, + 605, + 211, + 617 + ], + "type": "text", + "content": "B.1.2 Mathematical Reasoning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 625, + 158, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 625, + 158, + 637 + ], + "spans": [ + { + "bbox": [ + 69, + 625, + 158, + 637 + ], + "type": "text", + "content": "For MATH, GSM8K:" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 69, + 649, + 312, + 705 + ], + "blocks": [ + { + "bbox": [ + 69, + 649, + 312, + 705 + ], + "lines": [ + { + "bbox": [ + 69, + 649, + 312, + 705 + ], + "spans": [ + { + "bbox": [ + 69, + 649, + 312, + 705 + ], + "type": "text", + "content": "Solve the following math problem step by step. Remember to put your answer inside \\boxed{}" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 153, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 153, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 153, + 83 + ], + "type": "text", + "content": "B.1.3 Translation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 91, + 116, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 91, + 116, + 101 + ], + "spans": [ + { + "bbox": [ + 69, + 91, + 116, + 101 + ], + "type": "text", + "content": "For Flores:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 102, + 144, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 102, + 144, + 113 + ], + "spans": [ + { + "bbox": [ + 69, + 102, + 144, + 113 + ], + "type": "text", + "content": "Nova and LLama:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 120, + 538, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 120, + 538, + 144 + ], + "spans": [ + { + "bbox": [ + 69, + 120, + 538, + 144 + ], + "type": "text", + "content": "Translate the following text into {tgt-lang}. Please output only the translated text with no prefix or introduction: {src}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 157, + 143, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 157, + 143, + 168 + ], + "spans": [ + { + "bbox": [ + 69, + 157, + 143, + 168 + ], + "type": "text", + "content": "Gemini and GPT:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 175, + 527, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 175, + 527, + 199 + ], + "spans": [ + { + "bbox": [ + 69, + 175, + 527, + 199 + ], + "type": "text", + "content": "Your job is to translate a sentence from {src-lang} into {tgt-lang}. Please output ONLY the translation and nothing else: {src}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 220, + 162, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 220, + 162, + 232 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 162, + 232 + ], + "type": "text", + "content": "B.1.4 Long Context" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 239, + 541, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 239, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 68, + 239, + 541, + 262 + ], + "type": "text", + "content": "For SQuALITY (ZeroScrolls Benchmark), we use the standard prompt template for Amazon Nova and Gemini models as in [69]:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 268, + 453, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 268, + 453, + 282 + ], + "spans": [ + { + "bbox": [ + 69, + 268, + 453, + 282 + ], + "type": "text", + "content": "You are given a story and a question. Answer the question in a paragraph." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 290, + 104, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 290, + 104, + 300 + ], + "spans": [ + { + "bbox": [ + 69, + 290, + 104, + 300 + ], + "type": "text", + "content": "Story:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 302, + 109, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 302, + 109, + 313 + ], + "spans": [ + { + "bbox": [ + 69, + 302, + 109, + 313 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 323, + 118, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 323, + 118, + 334 + ], + "spans": [ + { + "bbox": [ + 69, + 323, + 118, + 334 + ], + "type": "text", + "content": "Question:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 335, + 124, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 124, + 346 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 124, + 346 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 356, + 108, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 356, + 108, + 366 + ], + "spans": [ + { + "bbox": [ + 69, + 356, + 108, + 366 + ], + "type": "text", + "content": "Answer:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 390, + 193, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 390, + 193, + 401 + ], + "spans": [ + { + "bbox": [ + 69, + 390, + 193, + 401 + ], + "type": "text", + "content": "B.2 Multimodal evaluation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 410, + 140, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 410, + 140, + 421 + ], + "spans": [ + { + "bbox": [ + 69, + 410, + 140, + 421 + ], + "type": "text", + "content": "B.2.1 MMMU" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 429, + 194, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 429, + 194, + 441 + ], + "spans": [ + { + "bbox": [ + 69, + 429, + 194, + 441 + ], + "type": "text", + "content": "For multiple-choice questions:" + } + ] + } + ], + "index": 17 + }, + { + "type": "code", + "bbox": [ + 69, + 447, + 521, + 635 + ], + "blocks": [ + { + "bbox": [ + 69, + 447, + 521, + 635 + ], + "lines": [ + { + "bbox": [ + 69, + 447, + 521, + 635 + ], + "spans": [ + { + "bbox": [ + 69, + 447, + 521, + 635 + ], + "type": "text", + "content": "With the image, the following question, and the four possible answers (A, B, C and D), select the correct answer. (A) (B) ... (X) - For clear-cut questions: Give the answer directly with minimal elaboration. - For complex questions: Adopt this step-by-step method: ## Step 1: [Concise description] [Brief explanation] ## Step 2: [Concise description] [Brief explanation] In every scenario, conclude with: The best answer is [the_answer_letter]. where [ the_answer_letter] is one of A, B, C or D. Let's proceed with a systematic approach" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "code_body" + } + ], + "index": 18, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 69, + 648, + 178, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 648, + 178, + 661 + ], + "spans": [ + { + "bbox": [ + 69, + 648, + 178, + 661 + ], + "type": "text", + "content": "For open-ended questions:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 667, + 426, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 667, + 426, + 679 + ], + "spans": [ + { + "bbox": [ + 69, + 667, + 426, + 679 + ], + "type": "text", + "content": "With the image and the following question, provide a correct answer." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 679, + 124, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 679, + 124, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 679, + 124, + 689 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 700, + 473, + 723 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 69, + 700, + 473, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 700, + 473, + 712 + ], + "spans": [ + { + "bbox": [ + 69, + 700, + 473, + 712 + ], + "type": "text", + "content": "- For clear-cut questions: Give the answer directly with minimal elaboration." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 712, + 364, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 712, + 364, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 712, + 364, + 723 + ], + "type": "text", + "content": "- For complex questions: Adopt this step-by-step method:" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 239, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 239, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 239, + 83 + ], + "type": "text", + "content": "Step 1: [Concise description]" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 72, + 84, + 169, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 84, + 169, + 95 + ], + "spans": [ + { + "bbox": [ + 72, + 84, + 169, + 95 + ], + "type": "text", + "content": "[Brief explanation]" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 95, + 237, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 95, + 237, + 106 + ], + "spans": [ + { + "bbox": [ + 71, + 95, + 237, + 106 + ], + "type": "text", + "content": "Step 2: [Concise description]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 72, + 106, + 169, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 106, + 169, + 117 + ], + "spans": [ + { + "bbox": [ + 72, + 106, + 169, + 117 + ], + "type": "text", + "content": "[Brief explanation]" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 126, + 521, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 126, + 521, + 161 + ], + "spans": [ + { + "bbox": [ + 69, + 126, + 521, + 161 + ], + "type": "text", + "content": "In every scenario, conclude with: The best answer is [the_answer Phrase]. where [ the_answer Phrase] is a concise and direct answer to the question Let's proceed with a systematic approach." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 180, + 251, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 180, + 251, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 180, + 251, + 192 + ], + "type": "text", + "content": "B.2.2 ChartQA, DocVQA, and TextVQA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 201, + 124, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 124, + 211 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 124, + 211 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 212, + 331, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 212, + 331, + 223 + ], + "spans": [ + { + "bbox": [ + 69, + 212, + 331, + 223 + ], + "type": "text", + "content": "Answer the question using a single word or phrase." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 243, + 136, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 243, + 136, + 253 + ], + "spans": [ + { + "bbox": [ + 69, + 243, + 136, + 253 + ], + "type": "text", + "content": "B.2.3 VATEX" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 262, + 530, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 262, + 530, + 297 + ], + "spans": [ + { + "bbox": [ + 68, + 262, + 530, + 297 + ], + "type": "text", + "content": "Render a clear and concise one-sentence summary of the video. The summary should be at least 10 words but no more than 20 words. Analyze the video first before summarizing it. Do not hallucinate objects." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 316, + 153, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 316, + 153, + 327 + ], + "spans": [ + { + "bbox": [ + 69, + 316, + 153, + 327 + ], + "type": "text", + "content": "B.2.4 EgoSchema" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 335, + 538, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 538, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 538, + 369 + ], + "type": "text", + "content": "You will be given a question about a video and three possible answer options. You will be provided frames from the video, sampled evenly across the video " + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 71, + 370, + 145, + 400 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 71, + 370, + 144, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 370, + 144, + 379 + ], + "spans": [ + { + "bbox": [ + 71, + 370, + 144, + 379 + ], + "type": "text", + "content": "(A) " + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 72, + 380, + 144, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 380, + 144, + 389 + ], + "spans": [ + { + "bbox": [ + 72, + 380, + 144, + 389 + ], + "type": "text", + "content": "(B) " + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 72, + 392, + 145, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 392, + 145, + 400 + ], + "spans": [ + { + "bbox": [ + 72, + 392, + 145, + 400 + ], + "type": "text", + "content": "(C)" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 402, + 404, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 402, + 404, + 413 + ], + "spans": [ + { + "bbox": [ + 69, + 402, + 404, + 413 + ], + "type": "text", + "content": "Answer with the option's letter from the given choices directly." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 413, + 394, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 413, + 394, + 424 + ], + "spans": [ + { + "bbox": [ + 69, + 413, + 394, + 424 + ], + "type": "text", + "content": "Answer with the option letter from the given choices directly." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 443, + 176, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 443, + 176, + 454 + ], + "spans": [ + { + "bbox": [ + 69, + 443, + 176, + 454 + ], + "type": "text", + "content": "B.2.5 VisualWebBench" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 463, + 186, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 463, + 186, + 474 + ], + "spans": [ + { + "bbox": [ + 69, + 463, + 186, + 474 + ], + "type": "text", + "content": "For the web captioning task:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 480, + 531, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 480, + 531, + 512 + ], + "spans": [ + { + "bbox": [ + 69, + 480, + 531, + 512 + ], + "type": "text", + "content": "\"You are given a screenshot of a webpage. Please generate the meta web description information of this webpage, i.e., content attribute in HTML element." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 524, + 509, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 524, + 509, + 546 + ], + "spans": [ + { + "bbox": [ + 69, + 524, + 509, + 546 + ], + "type": "text", + "content": "You should use this format, and do not output any explanation or any other contents: " + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 559, + 178, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 559, + 178, + 571 + ], + "spans": [ + { + "bbox": [ + 69, + 559, + 178, + 571 + ], + "type": "text", + "content": "For the heading OCR task:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 578, + 495, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 578, + 495, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 578, + 495, + 601 + ], + "type": "text", + "content": "You are given a screenshot of a webpage. Please generate the main text within the screenshot, which can be regarded as the heading of the webpage." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 610, + 537, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 610, + 537, + 633 + ], + "spans": [ + { + "bbox": [ + 69, + 610, + 537, + 633 + ], + "type": "text", + "content": "You should directly tell me the first sentence of the main content, and do not output any explanation or any other contents." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 646, + 157, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 646, + 157, + 658 + ], + "spans": [ + { + "bbox": [ + 69, + 646, + 157, + 658 + ], + "type": "text", + "content": "For the web QA task:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 69, + 665, + 124, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 665, + 124, + 674 + ], + "spans": [ + { + "bbox": [ + 69, + 665, + 124, + 674 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 69, + 675, + 526, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 526, + 697 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 526, + 697 + ], + "type": "text", + "content": "You should directly tell me your answer in the fewest words possible, and do not output any explanation or any other contents." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 69, + 711, + 178, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 711, + 178, + 721 + ], + "spans": [ + { + "bbox": [ + 69, + 711, + 178, + 721 + ], + "type": "text", + "content": "For the element OCR task:" + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 79, + 526, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 79, + 526, + 103 + ], + "spans": [ + { + "bbox": [ + 69, + 79, + 526, + 103 + ], + "type": "text", + "content": "You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is ." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 113, + 512, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 113, + 512, + 136 + ], + "spans": [ + { + "bbox": [ + 69, + 113, + 512, + 136 + ], + "type": "text", + "content": "Please perform OCR in the bounding box and recognize the text content within the red bounding box." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 150, + 192, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 150, + 192, + 162 + ], + "spans": [ + { + "bbox": [ + 69, + 150, + 192, + 162 + ], + "type": "text", + "content": "For the action prediction task:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 169, + 526, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 169, + 526, + 191 + ], + "spans": [ + { + "bbox": [ + 69, + 169, + 526, + 191 + ], + "type": "text", + "content": "You are given a screenshot of a webpage with a red rectangle bounding box. The [x1, y1, x2, y2] coordinates of the bounding box is ." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 192, + 522, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 192, + 522, + 224 + ], + "spans": [ + { + "bbox": [ + 69, + 192, + 522, + 224 + ], + "type": "text", + "content": "Please select the best webpage description that matches the new webpage after clicking the selected element in the bounding box: " + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 235, + 526, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 235, + 526, + 258 + ], + "spans": [ + { + "bbox": [ + 69, + 235, + 526, + 258 + ], + "type": "text", + "content": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 273, + 200, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 273, + 200, + 285 + ], + "spans": [ + { + "bbox": [ + 69, + 273, + 200, + 285 + ], + "type": "text", + "content": "For the element grounding task:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 292, + 538, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 292, + 538, + 314 + ], + "spans": [ + { + "bbox": [ + 69, + 292, + 538, + 314 + ], + "type": "text", + "content": "In this website screenshot, I have labeled IDs for some HTML elements as candicates. Tell me which one best matches the description: " + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 324, + 526, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 324, + 526, + 347 + ], + "spans": [ + { + "bbox": [ + 69, + 324, + 526, + 347 + ], + "type": "text", + "content": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 361, + 192, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 361, + 192, + 373 + ], + "spans": [ + { + "bbox": [ + 69, + 361, + 192, + 373 + ], + "type": "text", + "content": "For the action grounding task:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 381, + 537, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 381, + 537, + 404 + ], + "spans": [ + { + "bbox": [ + 69, + 381, + 537, + 404 + ], + "type": "text", + "content": "In this website screenshot, I have labeled IDs for some HTML elements as candidates. Tell me which one I should click to complete the following task: " + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 414, + 526, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 414, + 526, + 437 + ], + "spans": [ + { + "bbox": [ + 69, + 414, + 526, + 437 + ], + "type": "text", + "content": "You should directly tell me your choice in a single uppercase letter, and do not output any explanation or any other contents." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 459, + 174, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 459, + 174, + 470 + ], + "spans": [ + { + "bbox": [ + 69, + 459, + 174, + 470 + ], + "type": "text", + "content": "B.2.6 MM-Mind2Web" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 481, + 538, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 481, + 538, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 481, + 538, + 559 + ], + "type": "text", + "content": "Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() functions in playwright respectively). One next step means one operation within the three." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 569, + 365, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 569, + 365, + 581 + ], + "spans": [ + { + "bbox": [ + 69, + 569, + 365, + 581 + ], + "type": "text", + "content": "You are asked to complete the following task: " + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 591, + 160, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 591, + 160, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 591, + 160, + 601 + ], + "type": "text", + "content": "Previous Actions:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 602, + 166, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 166, + 613 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 166, + 613 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 613, + 316, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 613, + 316, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 613, + 316, + 624 + ], + "type": "text", + "content": "The screenshot below shows the webpage you see." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 645, + 538, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 645, + 538, + 668 + ], + "spans": [ + { + "bbox": [ + 69, + 645, + 538, + 668 + ], + "type": "text", + "content": "Follow the following guidance to think step by step before outlining the next action step at the current stage:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 677, + 238, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 238, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 238, + 689 + ], + "type": "text", + "content": "(Current Webpage Identification)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 689, + 326, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 689, + 326, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 689, + 326, + 700 + ], + "type": "text", + "content": "Firstly, think about what the current webpage is." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 711, + 207, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 711, + 207, + 723 + ], + "spans": [ + { + "bbox": [ + 70, + 711, + 207, + 723 + ], + "type": "text", + "content": "(Previous Action Analysis)" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 533, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 533, + 106 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 533, + 106 + ], + "type": "text", + "content": "Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 116, + 222, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 116, + 222, + 128 + ], + "spans": [ + { + "bbox": [ + 70, + 116, + 222, + 128 + ], + "type": "text", + "content": "(Screenshot Details Analysis)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 128, + 532, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 128, + 532, + 204 + ], + "spans": [ + { + "bbox": [ + 69, + 128, + 532, + 204 + ], + "type": "text", + "content": "Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 214, + 296, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 214, + 296, + 226 + ], + "spans": [ + { + "bbox": [ + 70, + 214, + 296, + 226 + ], + "type": "text", + "content": "(Next Action Based on Webpage and Analysis)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 226, + 537, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 226, + 537, + 270 + ], + "spans": [ + { + "bbox": [ + 69, + 226, + 537, + 270 + ], + "type": "text", + "content": "Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 280, + 405, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 280, + 405, + 291 + ], + "spans": [ + { + "bbox": [ + 69, + 280, + 405, + 291 + ], + "type": "text", + "content": "To be successful, it is important to follow the following rules:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 292, + 436, + 312 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 70, + 292, + 436, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 292, + 436, + 302 + ], + "spans": [ + { + "bbox": [ + 70, + 292, + 436, + 302 + ], + "type": "text", + "content": "1. You should only issue a valid action given the current observation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 303, + 311, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 303, + 311, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 303, + 311, + 312 + ], + "type": "text", + "content": "2. You should only issue one action at a time." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 323, + 139, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 323, + 139, + 335 + ], + "spans": [ + { + "bbox": [ + 70, + 323, + 139, + 335 + ], + "type": "text", + "content": "(Reiteration)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 335, + 527, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 527, + 357 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 527, + 357 + ], + "type": "text", + "content": "First, reiterate your next target element, its detailed location, and the corresponding operation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 367, + 186, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 367, + 186, + 378 + ], + "spans": [ + { + "bbox": [ + 70, + 367, + 186, + 378 + ], + "type": "text", + "content": "(Multichoice Question)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 378, + 537, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 378, + 537, + 434 + ], + "spans": [ + { + "bbox": [ + 69, + 378, + 537, + 434 + ], + "type": "text", + "content": "Below is a multi-choice question, where the choices are elements in the webpage. From the screenshot, find out where and what each one is on the webpage. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by reexamining the screenshot, the choices, and your further reasoning." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 443, + 536, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 443, + 536, + 455 + ], + "spans": [ + { + "bbox": [ + 69, + 443, + 536, + 455 + ], + "type": "text", + "content": "If none of these elements match your target element, please select, select ." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 71, + 455, + 347, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 455, + 347, + 465 + ], + "spans": [ + { + "bbox": [ + 71, + 455, + 347, + 465 + ], + "type": "text", + "content": "None of the other options match the correct element." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 466, + 468, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 466, + 468, + 476 + ], + "spans": [ + { + "bbox": [ + 69, + 466, + 468, + 476 + ], + "type": "text", + "content": ". None of the other options match the correct element." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 487, + 537, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 487, + 537, + 532 + ], + "spans": [ + { + "bbox": [ + 69, + 487, + 537, + 532 + ], + "type": "text", + "content": "(Final Answer)Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 542, + 108, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 542, + 108, + 552 + ], + "spans": [ + { + "bbox": [ + 70, + 542, + 108, + 552 + ], + "type": "text", + "content": "Format:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 563, + 306, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 563, + 306, + 575 + ], + "spans": [ + { + "bbox": [ + 69, + 563, + 306, + 575 + ], + "type": "text", + "content": "ELEMENT: The uppercase letter of your choice." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 585, + 522, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 585, + 522, + 608 + ], + "spans": [ + { + "bbox": [ + 69, + 585, + 522, + 608 + ], + "type": "text", + "content": "ACTION: Choose an action from {CLICK, TYPE, SELECT, NONE}. Use NONE only if you choose option F for the ELEMENT" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 618, + 321, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 618, + 321, + 629 + ], + "spans": [ + { + "bbox": [ + 69, + 618, + 321, + 629 + ], + "type": "text", + "content": "VALUE: Provide additional input based on ACTION." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 640, + 154, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 640, + 154, + 651 + ], + "spans": [ + { + "bbox": [ + 69, + 640, + 154, + 651 + ], + "type": "text", + "content": "The VALUE means:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 651, + 321, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 651, + 321, + 662 + ], + "spans": [ + { + "bbox": [ + 69, + 651, + 321, + 662 + ], + "type": "text", + "content": "If ACTION == TYPE, specify the text to be typed." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 662, + 347, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 662, + 347, + 673 + ], + "spans": [ + { + "bbox": [ + 69, + 662, + 347, + 673 + ], + "type": "text", + "content": "If ACTION == SELECT, specify the option to be chosen." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 69, + 673, + 242, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 673, + 242, + 683 + ], + "spans": [ + { + "bbox": [ + 69, + 673, + 242, + 683 + ], + "type": "text", + "content": "If ACTION == CLICK, write \"None\"." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 69, + 707, + 165, + 719 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 707, + 165, + 719 + ], + "spans": [ + { + "bbox": [ + 69, + 707, + 165, + 719 + ], + "type": "text", + "content": "B.2.7 GroundUI-1K" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 79, + 538, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 79, + 538, + 103 + ], + "spans": [ + { + "bbox": [ + 69, + 79, + 538, + 103 + ], + "type": "text", + "content": "Which action should I do if I want to Click on and where is the action? Express the location coordinates using the (x1, y1, x2, y2) format, scaled between 0 and 1000." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 124, + 196, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 124, + 196, + 136 + ], + "spans": [ + { + "bbox": [ + 69, + 124, + 196, + 136 + ], + "type": "text", + "content": "B.3 Functional Capabilities" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 144, + 133, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 144, + 133, + 156 + ], + "spans": [ + { + "bbox": [ + 69, + 144, + 133, + 156 + ], + "type": "text", + "content": "B.3.1 FinQA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 164, + 538, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 164, + 538, + 209 + ], + "spans": [ + { + "bbox": [ + 69, + 164, + 538, + 209 + ], + "type": "text", + "content": "Given the following finance question, analyze the question in details step-by-step before giving the final answer. Your answer should begin with \"Lets think step-by-step\". Your response should end with \"The answer is [the_final_answer]\", where [the_final_answer] should be the most concise answer without any explanation." + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 69, + 220, + 159, + 308 + ], + "blocks": [ + { + "bbox": [ + 69, + 220, + 159, + 308 + ], + "lines": [ + { + "bbox": [ + 69, + 220, + 159, + 308 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 159, + 308 + ], + "type": "text", + "content": "```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```java\n```\n```\n``" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 67, + 321, + 541, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 321, + 541, + 355 + ], + "spans": [ + { + "bbox": [ + 67, + 321, + 541, + 355 + ], + "type": "text", + "content": "We use regex \"The answer is (.*)\" to extract the answer. We convert answers with percent signs and magnitude terms to decimal numerical representation (e.g. convert " + }, + { + "bbox": [ + 67, + 321, + 541, + 355 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 67, + 321, + 541, + 355 + ], + "type": "text", + "content": " to 0.013 and \"5.2 millions\" to 5,200,000). An answer is correct if it is identical to the ground truth when rounded to the same decimal places." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 365, + 126, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 365, + 126, + 376 + ], + "spans": [ + { + "bbox": [ + 69, + 365, + 126, + 376 + ], + "type": "text", + "content": "B.3.2 RAG" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 69, + 387, + 538, + 639 + ], + "blocks": [ + { + "bbox": [ + 69, + 387, + 538, + 639 + ], + "lines": [ + { + "bbox": [ + 69, + 387, + 538, + 639 + ], + "spans": [ + { + "bbox": [ + 69, + 387, + 538, + 639 + ], + "type": "text", + "content": "You are a teacher grading a quiz. \nYou are given a question, the student's answer, and the true answer, and are asked to score the student answer as either Correct or Incorrect. \nExample Format: \nQUESTION: question here \nSTUDENT ANSWER: student's answer here \nTRUE ANSWER: true answer here \nGRADE: Correct or Incorrect here \nGrade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! \nQUESTION: {query} \nSTUDENT ANSWER: {answer} \nTRUE ANSWER: {expected_answer} \nGRADE: \nYour response should be in json format as follows: \n{{ \"justification\": (Without mentioning the student/teacher framing of this prompt, explain why the STUDENT ANSWER is Correct or Incorrect. Use one or two sentences maximum. Keep the answer as concise as possible.), \"grade\": (correct or incorrect) \n}}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 298, + 741, + 309, + 750 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 337, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 337, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 337, + 86 + ], + "type": "text", + "content": "C Qualitative examples of multimodal intelligence" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 83, + 110, + 97, + 124 + ], + "blocks": [ + { + "bbox": [ + 83, + 110, + 97, + 124 + ], + "lines": [ + { + "bbox": [ + 83, + 110, + 97, + 124 + ], + "spans": [ + { + "bbox": [ + 83, + 110, + 97, + 124 + ], + "type": "image", + "image_path": "b6fee84e3dec1c634c54caa6be6cd5718dbb5c0e2717596731f7713ff1f6cffc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 102, + 111, + 157, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 111, + 157, + 125 + ], + "spans": [ + { + "bbox": [ + 102, + 111, + 157, + 125 + ], + "type": "text", + "content": "User input" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 141, + 287, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 141, + 287, + 176 + ], + "spans": [ + { + "bbox": [ + 80, + 141, + 287, + 176 + ], + "type": "text", + "content": "This looks like an amazing site. What is it and where is it located? Are there other fun things around this site to explore?" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 107, + 178, + 265, + 376 + ], + "blocks": [ + { + "bbox": [ + 107, + 178, + 265, + 376 + ], + "lines": [ + { + "bbox": [ + 107, + 178, + 265, + 376 + ], + "spans": [ + { + "bbox": [ + 107, + 178, + 265, + 376 + ], + "type": "image", + "image_path": "40ed9725a10bde768a14e4c23a1bbeaa48fa7bb2624851ec2ecc717d04925b21.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 171, + 384, + 437, + 397 + ], + "lines": [ + { + "bbox": [ + 171, + 384, + 437, + 397 + ], + "spans": [ + { + "bbox": [ + 171, + 384, + 437, + 397 + ], + "type": "text", + "content": "Figure 6: Created with Nova Pro. Photo taken by a team member." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 319, + 110, + 332, + 124 + ], + "blocks": [ + { + "bbox": [ + 319, + 110, + 332, + 124 + ], + "lines": [ + { + "bbox": [ + 319, + 110, + 332, + 124 + ], + "spans": [ + { + "bbox": [ + 319, + 110, + 332, + 124 + ], + "type": "image", + "image_path": "8a0926b7ab13709954d88c169fa4b58825725dd4d4f17f64bb0c74906113cf3a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 336, + 111, + 386, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 111, + 386, + 125 + ], + "spans": [ + { + "bbox": [ + 336, + 111, + 386, + 125 + ], + "type": "text", + "content": "Response" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 141, + 519, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 141, + 519, + 222 + ], + "spans": [ + { + "bbox": [ + 318, + 141, + 519, + 222 + ], + "type": "text", + "content": "This is the Space Needle, a landmark in Seattle, Washington, USA. It is a well-known observation tower and a symbol of Seattle. It was built for the 1962 World's Fair and has since become a popular tourist attraction. The Space Needle offers panoramic views of the city, the surrounding mountains, and the Puget Sound." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 223, + 526, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 223, + 526, + 373 + ], + "spans": [ + { + "bbox": [ + 318, + 223, + 526, + 373 + ], + "type": "text", + "content": "The Space Needle is located in the Seattle Center, which is a cultural and entertainment hub. The Seattle Center is home to many other attractions, including the Chihuly Garden and Glass, the Museum of Pop Culture, and the Pacific Science Center. The Seattle Center also hosts events like concerts, festivals, and art exhibitions. The Seattle Center is also close to other popular attractions in Seattle, such as the Pike Place Market, the Seattle Aquarium, and the Seattle Art Museum. The city is known for its vibrant arts and culture scene, outdoor activities, and beautiful natural surroundings." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 80, + 123, + 332, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 123, + 332, + 135 + ], + "spans": [ + { + "bbox": [ + 80, + 123, + 332, + 135 + ], + "type": "text", + "content": "User input: \"Search for Psych and buy the first episode\"" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 152, + 128, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 152, + 128, + 161 + ], + "spans": [ + { + "bbox": [ + 75, + 152, + 128, + 161 + ], + "type": "text", + "content": "Agent step 1" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 71, + 163, + 302, + 396 + ], + "blocks": [ + { + "bbox": [ + 71, + 163, + 302, + 396 + ], + "lines": [ + { + "bbox": [ + 71, + 163, + 302, + 396 + ], + "spans": [ + { + "bbox": [ + 71, + 163, + 302, + 396 + ], + "type": "image", + "image_path": "f62b7b8f0fa10516c22fcbbd7182f1adbd2b48c22b345d2038ebbd505fb4475e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 314, + 152, + 368, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 152, + 368, + 161 + ], + "spans": [ + { + "bbox": [ + 314, + 152, + 368, + 161 + ], + "type": "text", + "content": "Agent step 2" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 163, + 541, + 395 + ], + "blocks": [ + { + "bbox": [ + 310, + 163, + 541, + 395 + ], + "lines": [ + { + "bbox": [ + 310, + 163, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 310, + 163, + 541, + 395 + ], + "type": "image", + "image_path": "40017c96e300bbe4f19d2bff766d3a5a22bc1aac134344ab06f81425b38ce48d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 414, + 129, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 414, + 129, + 423 + ], + "spans": [ + { + "bbox": [ + 75, + 414, + 129, + 423 + ], + "type": "text", + "content": "Agent step 3" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 72, + 426, + 302, + 657 + ], + "blocks": [ + { + "bbox": [ + 72, + 426, + 302, + 657 + ], + "lines": [ + { + "bbox": [ + 72, + 426, + 302, + 657 + ], + "spans": [ + { + "bbox": [ + 72, + 426, + 302, + 657 + ], + "type": "image", + "image_path": "e2b9071e056e37ac4eb9d430bd3f923d778aeea4fa88dcd30d9470cf968999bb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 662, + 390, + 673 + ], + "lines": [ + { + "bbox": [ + 220, + 662, + 390, + 673 + ], + "spans": [ + { + "bbox": [ + 220, + 662, + 390, + 673 + ], + "type": "text", + "content": "Figure 7: Example of a multimodal agent." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 414, + 369, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 414, + 369, + 423 + ], + "spans": [ + { + "bbox": [ + 314, + 414, + 369, + 423 + ], + "type": "text", + "content": "Agent step 4" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 426, + 541, + 657 + ], + "blocks": [ + { + "bbox": [ + 310, + 426, + 541, + 657 + ], + "lines": [ + { + "bbox": [ + 310, + 426, + 541, + 657 + ], + "spans": [ + { + "bbox": [ + 310, + 426, + 541, + 657 + ], + "type": "image", + "image_path": "b32c411bb61018a8cbea165020cca783de3eec6926bb4ae55ba05b11185084fc.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 83, + 263, + 96, + 277 + ], + "blocks": [ + { + "bbox": [ + 83, + 263, + 96, + 277 + ], + "lines": [ + { + "bbox": [ + 83, + 263, + 96, + 277 + ], + "spans": [ + { + "bbox": [ + 83, + 263, + 96, + 277 + ], + "type": "image", + "image_path": "d0909d37150b6e506c3322688439c181301d0021cd13c9fc9c174a56e6668fa5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 264, + 155, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 264, + 155, + 277 + ], + "spans": [ + { + "bbox": [ + 102, + 264, + 155, + 277 + ], + "type": "text", + "content": "User input" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 81, + 293, + 189, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 293, + 189, + 316 + ], + "spans": [ + { + "bbox": [ + 81, + 293, + 189, + 316 + ], + "type": "text", + "content": "What happened to my tire? What should I do?" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 97, + 323, + 236, + 507 + ], + "blocks": [ + { + "bbox": [ + 97, + 323, + 236, + 507 + ], + "lines": [ + { + "bbox": [ + 97, + 323, + 236, + 507 + ], + "spans": [ + { + "bbox": [ + 97, + 323, + 236, + 507 + ], + "type": "image", + "image_path": "ac9cea2d55627a6fb2b6475d2b322f234e90b3b7231e196768db56042240a551.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 196, + 522, + 414, + 534 + ], + "lines": [ + { + "bbox": [ + 196, + 522, + 414, + 534 + ], + "spans": [ + { + "bbox": [ + 196, + 522, + 414, + 534 + ], + "type": "text", + "content": "Figure 8: Created with Nova Pro. Image source: [88]." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 281, + 264, + 293, + 277 + ], + "blocks": [ + { + "bbox": [ + 281, + 264, + 293, + 277 + ], + "lines": [ + { + "bbox": [ + 281, + 264, + 293, + 277 + ], + "spans": [ + { + "bbox": [ + 281, + 264, + 293, + 277 + ], + "type": "image", + "image_path": "4182d1ad86670b1b607652228cf65789d3f9bb027a78f59a88228ade9b455e2d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 297, + 265, + 345, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 265, + 345, + 277 + ], + "spans": [ + { + "bbox": [ + 297, + 265, + 345, + 277 + ], + "type": "text", + "content": "Response" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 280, + 294, + 508, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 294, + 508, + 327 + ], + "spans": [ + { + "bbox": [ + 280, + 294, + 508, + 327 + ], + "type": "text", + "content": "It appears that your tire has a significant puncture, as evidenced by the deflated state and visible damage. Here's what you should do:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 280, + 328, + 523, + 464 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 280, + 328, + 517, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 328, + 517, + 350 + ], + "spans": [ + { + "bbox": [ + 280, + 328, + 517, + 350 + ], + "type": "text", + "content": "1. Assess the Damage: Confirm the puncture by checking for any visible holes or debris embedded in the tire." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 280, + 351, + 520, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 351, + 520, + 361 + ], + "spans": [ + { + "bbox": [ + 280, + 351, + 520, + 361 + ], + "type": "text", + "content": "2. Check Tire Pressure: Ensure the tire is completely deflated." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 280, + 361, + 523, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 361, + 523, + 384 + ], + "spans": [ + { + "bbox": [ + 280, + 361, + 523, + 384 + ], + "type": "text", + "content": "3. Remove the Tire: If you're comfortable, you can remove the wheel to inspect the puncture more closely." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 280, + 384, + 515, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 384, + 515, + 407 + ], + "spans": [ + { + "bbox": [ + 280, + 384, + 515, + 407 + ], + "type": "text", + "content": "4. Temporary Fix: Use a tire repair kit to patch the hole if it's small. This is a temporary solution." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 280, + 407, + 523, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 407, + 523, + 440 + ], + "spans": [ + { + "bbox": [ + 280, + 407, + 523, + 440 + ], + "type": "text", + "content": "5. Professional Repair: Visit a tire shop for a proper repair or replacement. They can assess if the tire can be patched or if a new tire is needed." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 280, + 441, + 514, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 441, + 514, + 464 + ], + "spans": [ + { + "bbox": [ + 280, + 441, + 514, + 464 + ], + "type": "text", + "content": "6. Inspect Other Tires: Check the other tires for any signs of damage or wear." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 280, + 464, + 503, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 464, + 503, + 486 + ], + "spans": [ + { + "bbox": [ + 280, + 464, + 503, + 486 + ], + "type": "text", + "content": "Would you like more detailed instructions on any of these steps?" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 308, + 750 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 83, + 79, + 97, + 94 + ], + "blocks": [ + { + "bbox": [ + 83, + 79, + 97, + 94 + ], + "lines": [ + { + "bbox": [ + 83, + 79, + 97, + 94 + ], + "spans": [ + { + "bbox": [ + 83, + 79, + 97, + 94 + ], + "type": "image", + "image_path": "11c83a4fc91e912143be2cb657581d8025a865186a75371afd4135014166cf8f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 80, + 157, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 80, + 157, + 94 + ], + "spans": [ + { + "bbox": [ + 102, + 80, + 157, + 94 + ], + "type": "text", + "content": "User input" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 80, + 110, + 277, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 110, + 277, + 134 + ], + "spans": [ + { + "bbox": [ + 80, + 110, + 277, + 134 + ], + "type": "text", + "content": "Group this sales data by month using pandas and then create a bar chart of the sales per month." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 134, + 138, + 239, + 277 + ], + "blocks": [ + { + "bbox": [ + 134, + 138, + 239, + 277 + ], + "lines": [ + { + "bbox": [ + 134, + 138, + 239, + 277 + ], + "spans": [ + { + "bbox": [ + 134, + 138, + 239, + 277 + ], + "type": "table", + "html": "
MonthSales
Jan1523.13
Jan957
Feb133
March452
May521
June901
July1334
July131
Aug2601
Dec3005
", + "image_path": "e89c885cecd0fdc170940dc1b4a7578c536bfa8bd858aa091078589ec47fa28a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 97, + 308, + 275, + 420 + ], + "blocks": [ + { + "bbox": [ + 104, + 293, + 170, + 305 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 170, + 305 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 170, + 305 + ], + "type": "text", + "content": "Generated plot:" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 308, + 275, + 420 + ], + "lines": [ + { + "bbox": [ + 97, + 308, + 275, + 420 + ], + "spans": [ + { + "bbox": [ + 97, + 308, + 275, + 420 + ], + "type": "image", + "image_path": "14520d6be03d8ce44c8e422a387e8ae19c3963854102147343ff7ee1426a2348.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 320, + 80, + 332, + 93 + ], + "blocks": [ + { + "bbox": [ + 320, + 80, + 332, + 93 + ], + "lines": [ + { + "bbox": [ + 320, + 80, + 332, + 93 + ], + "spans": [ + { + "bbox": [ + 320, + 80, + 332, + 93 + ], + "type": "image", + "image_path": "aa02909025c79a2e3bc10826fb45b862b4bcc1dd1005746e68c4bf07a0a94712.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 336, + 81, + 386, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 81, + 386, + 94 + ], + "spans": [ + { + "bbox": [ + 336, + 81, + 386, + 94 + ], + "type": "text", + "content": "Response" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 318, + 108, + 529, + 352 + ], + "blocks": [ + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "lines": [ + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "spans": [ + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": "import pandas as pd \nimport matplotlib.pyplot as plt \n# Data \ndata " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " { Month':['Jan','Jan','Feb','March','May','June','July' July','Aug','Dec'], Sales':[1523.13,957,133,452,521,901,1334,131,2601,3005] } \n# Create DataFrame df " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " pd.DataFrame(data) \n# Group by month and sum sales monthly_sales " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " df.groupby('Month')[Sales'].sum().reset_index() \n# Sort months based on calendar monthly_sales['Month'] " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " pd.Categorical( monthly_sales['Month'], categories " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "\\coloneqq" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " ['Jan','Feb','March','April','May','June','July', Aug,'Sept','Oct','Nov','Dec'], ordered=True \nmonthsales " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " monthly_sales.sort_values('Month') \n# Plot bar chart plt.figure(figsize=(10,6)) plt.bar(monthly_sales['Month'], monthly_sales['Sales'], color " + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 318, + 108, + 529, + 352 + ], + "type": "text", + "content": " 'blue') plt.xlabel('Month') pltylabel('Total Sales') plt.title('Total Sales per Month') plt.show()" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "image", + "bbox": [ + 83, + 456, + 97, + 471 + ], + "blocks": [ + { + "bbox": [ + 83, + 456, + 97, + 471 + ], + "lines": [ + { + "bbox": [ + 83, + 456, + 97, + 471 + ], + "spans": [ + { + "bbox": [ + 83, + 456, + 97, + 471 + ], + "type": "image", + "image_path": "ca75d134a16e3155d8778b1af440402be2f8402314bc6cd2b56a644be95182cb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 102, + 457, + 157, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 457, + 157, + 471 + ], + "spans": [ + { + "bbox": [ + 102, + 457, + 157, + 471 + ], + "type": "text", + "content": "User input" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 80, + 487, + 251, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 487, + 251, + 511 + ], + "spans": [ + { + "bbox": [ + 80, + 487, + 251, + 511 + ], + "type": "text", + "content": "How many people say the European Union responded well to the pandemic?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 529, + 252, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 529, + 252, + 548 + ], + "spans": [ + { + "bbox": [ + 108, + 529, + 252, + 548 + ], + "type": "text", + "content": "Poor marks for America's response to coronavirus outbreak" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 551, + 247, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 551, + 247, + 567 + ], + "spans": [ + { + "bbox": [ + 108, + 551, + 247, + 567 + ], + "type": "text", + "content": "% who say each has done a job dealing with the coronavirus outbreak" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 109, + 570, + 260, + 636 + ], + "blocks": [ + { + "bbox": [ + 109, + 570, + 260, + 636 + ], + "lines": [ + { + "bbox": [ + 109, + 570, + 260, + 636 + ], + "spans": [ + { + "bbox": [ + 109, + 570, + 260, + 636 + ], + "type": "image", + "image_path": "e537c2f1ffe8ae9930b85ad5babd789496b6b51b9653a1c9bf7e807992b5257a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 707, + 452, + 719 + ], + "lines": [ + { + "bbox": [ + 157, + 707, + 452, + 719 + ], + "spans": [ + { + "bbox": [ + 157, + 707, + 452, + 719 + ], + "type": "text", + "content": "Figure 10: Created with Nova Lite. Image source: ChartQA dataset [50]." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 108, + 639, + 259, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 639, + 259, + 673 + ], + "spans": [ + { + "bbox": [ + 108, + 639, + 259, + 673 + ], + "type": "text", + "content": "Note: Percentages are medians based on 13 countries. Those who did not answer are not shown. Source: Summer 2020 Global Attitudes Survey. Q10a-e. U.S. Image Plummets Internationally as Most Say Country Has Handled Coronavirus Badly" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 109, + 675, + 167, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 675, + 167, + 682 + ], + "spans": [ + { + "bbox": [ + 109, + 675, + 167, + 682 + ], + "type": "text", + "content": "PEW RESEARCH CENTER" + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 320, + 457, + 332, + 471 + ], + "blocks": [ + { + "bbox": [ + 236, + 427, + 373, + 439 + ], + "lines": [ + { + "bbox": [ + 236, + 427, + 373, + 439 + ], + "spans": [ + { + "bbox": [ + 236, + 427, + 373, + 439 + ], + "type": "text", + "content": "Figure 9: Created with Nova Pro." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 457, + 332, + 471 + ], + "lines": [ + { + "bbox": [ + 320, + 457, + 332, + 471 + ], + "spans": [ + { + "bbox": [ + 320, + 457, + 332, + 471 + ], + "type": "image", + "image_path": "ac91511bde96ff7f67aab4256d93ae2e68faf1e7ea3de954ee8f09249bf14491.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 336, + 458, + 386, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 458, + 386, + 472 + ], + "spans": [ + { + "bbox": [ + 336, + 458, + 386, + 472 + ], + "type": "text", + "content": "Response" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "spans": [ + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "type": "text", + "content": "The chart shows that " + }, + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "type": "inline_equation", + "content": "57\\%" + }, + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "type": "text", + "content": " of people say the European Union responded well to the pandemic. This is indicated by the green bar labeled \"Good\" for the EU, which extends to a height of " + }, + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "type": "inline_equation", + "content": "57\\%" + }, + { + "bbox": [ + 318, + 487, + 526, + 568 + ], + "type": "text", + "content": ". This suggests that a majority of the people surveyed had a positive view of the EU's response to the pandemic." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 269, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 269, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 269, + 85 + ], + "type": "text", + "content": "D Correspondence and Contributors" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 95, + 217, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 95, + 217, + 106 + ], + "spans": [ + { + "bbox": [ + 69, + 95, + 217, + 106 + ], + "type": "text", + "content": "Please direct all correspondences to:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 210, + 119, + 400, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 119, + 400, + 132 + ], + "spans": [ + { + "bbox": [ + 210, + 119, + 400, + 132 + ], + "type": "text", + "content": "nova-technical-report@amazon.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 144, + 541, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 144, + 541, + 166 + ], + "spans": [ + { + "bbox": [ + 67, + 144, + 541, + 166 + ], + "type": "text", + "content": "The Nova family of models were built by the Amazon Artificial General Intelligence (AGI) organization and partner teams." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 171, + 491, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 171, + 491, + 184 + ], + "spans": [ + { + "bbox": [ + 68, + 171, + 491, + 184 + ], + "type": "text", + "content": "When citing this report, please use \"Amazon AGI\" as the sole author, as shown in the bibtex entry below." + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 69, + 189, + 499, + 266 + ], + "blocks": [ + { + "bbox": [ + 69, + 189, + 499, + 266 + ], + "lines": [ + { + "bbox": [ + 69, + 189, + 499, + 266 + ], + "spans": [ + { + "bbox": [ + 69, + 189, + 499, + 266 + ], + "type": "text", + "content": "@misc{novatechreport, author = {Amazon AGI}, title = {The Amazon Nova Family of Models: Technical Report and Model Card}, year = {2024}, url = {https://www.amazon.science/publications/the-amazon-nova-family-of-models-technical-report-and-model-card} }" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "bib" + }, + { + "bbox": [ + 69, + 289, + 153, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 289, + 153, + 300 + ], + "spans": [ + { + "bbox": [ + 69, + 289, + 153, + 300 + ], + "type": "text", + "content": "D.1 Contributors" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 308, + 541, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 308, + 541, + 331 + ], + "spans": [ + { + "bbox": [ + 68, + 308, + 541, + 331 + ], + "type": "text", + "content": "The following individuals worked in the Nova program for at least one-fifth of its duration and measurably impacted one or more of the models or services described in this report." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 342, + 137, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 342, + 137, + 354 + ], + "spans": [ + { + "bbox": [ + 69, + 342, + 137, + 354 + ], + "type": "text", + "content": "Aaron Langford" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 354, + 124, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 124, + 364 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 124, + 364 + ], + "type": "text", + "content": "Aayush Shah" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 365, + 138, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 365, + 138, + 376 + ], + "spans": [ + { + "bbox": [ + 70, + 365, + 138, + 376 + ], + "type": "text", + "content": "Abhanshu Gupta" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 376, + 150, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 376, + 150, + 386 + ], + "spans": [ + { + "bbox": [ + 70, + 376, + 150, + 386 + ], + "type": "text", + "content": "Abhimanyu Bhatter" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 387, + 132, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 387, + 132, + 397 + ], + "spans": [ + { + "bbox": [ + 70, + 387, + 132, + 397 + ], + "type": "text", + "content": "Abhinav Goyal" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 398, + 138, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 398, + 138, + 407 + ], + "spans": [ + { + "bbox": [ + 70, + 398, + 138, + 407 + ], + "type": "text", + "content": "Abhinav Mathur" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 409, + 144, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 409, + 144, + 419 + ], + "spans": [ + { + "bbox": [ + 70, + 409, + 144, + 419 + ], + "type": "text", + "content": "Abhinav Mohanty" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 420, + 140, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 420, + 140, + 429 + ], + "spans": [ + { + "bbox": [ + 70, + 420, + 140, + 429 + ], + "type": "text", + "content": "Abhishek Kumar" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 430, + 132, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 430, + 132, + 439 + ], + "spans": [ + { + "bbox": [ + 70, + 430, + 132, + 439 + ], + "type": "text", + "content": "Abhishek Sethi" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 441, + 121, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 441, + 121, + 451 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 121, + 451 + ], + "type": "text", + "content": "Abi Komma" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 453, + 118, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 453, + 118, + 461 + ], + "spans": [ + { + "bbox": [ + 70, + 453, + 118, + 461 + ], + "type": "text", + "content": "Abner Pena" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 463, + 114, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 463, + 114, + 472 + ], + "spans": [ + { + "bbox": [ + 70, + 463, + 114, + 472 + ], + "type": "text", + "content": "Achin Jain" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 475, + 129, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 475, + 129, + 484 + ], + "spans": [ + { + "bbox": [ + 70, + 475, + 129, + 484 + ], + "type": "text", + "content": "Adam Kunysz" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 485, + 135, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 485, + 135, + 495 + ], + "spans": [ + { + "bbox": [ + 70, + 485, + 135, + 495 + ], + "type": "text", + "content": "Adam Opyrchal" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 496, + 126, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 496, + 126, + 506 + ], + "spans": [ + { + "bbox": [ + 70, + 496, + 126, + 506 + ], + "type": "text", + "content": "Adarsh Singh" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 70, + 507, + 126, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 507, + 126, + 517 + ], + "spans": [ + { + "bbox": [ + 70, + 507, + 126, + 517 + ], + "type": "text", + "content": "Aditya Rawal" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 70, + 517, + 183, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 517, + 183, + 527 + ], + "spans": [ + { + "bbox": [ + 70, + 517, + 183, + 527 + ], + "type": "text", + "content": "Adok Achar Budihal Prasad" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 70, + 529, + 138, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 529, + 138, + 538 + ], + "spans": [ + { + "bbox": [ + 70, + 529, + 138, + 538 + ], + "type": "text", + "content": "Adrià de Gispert" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 70, + 540, + 130, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 540, + 130, + 550 + ], + "spans": [ + { + "bbox": [ + 70, + 540, + 130, + 550 + ], + "type": "text", + "content": "Agnika Kumar" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 70, + 551, + 159, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 551, + 159, + 561 + ], + "spans": [ + { + "bbox": [ + 70, + 551, + 159, + 561 + ], + "type": "text", + "content": "Aishwarya Aryamane" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 70, + 562, + 111, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 562, + 111, + 572 + ], + "spans": [ + { + "bbox": [ + 70, + 562, + 111, + 572 + ], + "type": "text", + "content": "Ajay Nair" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 70, + 573, + 110, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 573, + 110, + 582 + ], + "spans": [ + { + "bbox": [ + 70, + 573, + 110, + 582 + ], + "type": "text", + "content": "Akilan M" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 70, + 584, + 139, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 584, + 139, + 594 + ], + "spans": [ + { + "bbox": [ + 70, + 584, + 139, + 594 + ], + "type": "text", + "content": "Akshaya Iyengar" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 70, + 594, + 216, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 594, + 216, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 594, + 216, + 605 + ], + "type": "text", + "content": "Akshaya Vishnu Kudlu Shanbhogue" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 70, + 605, + 105, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 605, + 105, + 614 + ], + "spans": [ + { + "bbox": [ + 70, + 605, + 105, + 614 + ], + "type": "text", + "content": "Alan He" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 70, + 616, + 152, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 616, + 152, + 625 + ], + "spans": [ + { + "bbox": [ + 70, + 616, + 152, + 625 + ], + "type": "text", + "content": "Alessandra Cervone" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 70, + 627, + 114, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 627, + 114, + 635 + ], + "spans": [ + { + "bbox": [ + 70, + 627, + 114, + 635 + ], + "type": "text", + "content": "Alex Loeb" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 70, + 637, + 119, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 637, + 119, + 647 + ], + "spans": [ + { + "bbox": [ + 70, + 637, + 119, + 647 + ], + "type": "text", + "content": "Alex Zhang" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 70, + 649, + 126, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 649, + 126, + 658 + ], + "spans": [ + { + "bbox": [ + 70, + 649, + 126, + 658 + ], + "type": "text", + "content": "Alexander Fu" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 70, + 659, + 164, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 659, + 164, + 669 + ], + "spans": [ + { + "bbox": [ + 70, + 659, + 164, + 669 + ], + "type": "text", + "content": "Alexander Lisnichenko" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 70, + 670, + 138, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 670, + 138, + 681 + ], + "spans": [ + { + "bbox": [ + 70, + 670, + 138, + 681 + ], + "type": "text", + "content": "Alexander Zhipa" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 70, + 682, + 166, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 682, + 166, + 691 + ], + "spans": [ + { + "bbox": [ + 70, + 682, + 166, + 691 + ], + "type": "text", + "content": "Alexandros Potamianos" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 70, + 693, + 138, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 693, + 138, + 703 + ], + "spans": [ + { + "bbox": [ + 70, + 693, + 138, + 703 + ], + "type": "text", + "content": "Ali Kebarighotbi" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 70, + 704, + 157, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 704, + 157, + 712 + ], + "spans": [ + { + "bbox": [ + 70, + 704, + 157, + 712 + ], + "type": "text", + "content": "Aliakbar Daronkolaei" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 229, + 342, + 288, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 342, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 229, + 342, + 288, + 353 + ], + "type": "text", + "content": "Alok Parmesh" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 229, + 354, + 316, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 354, + 316, + 364 + ], + "spans": [ + { + "bbox": [ + 229, + 354, + 316, + 364 + ], + "type": "text", + "content": "Amanjot Kaur Samra" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 230, + 365, + 283, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 365, + 283, + 374 + ], + "spans": [ + { + "bbox": [ + 230, + 365, + 283, + 374 + ], + "type": "text", + "content": "Ameen Khan" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 230, + 376, + 271, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 376, + 271, + 385 + ], + "spans": [ + { + "bbox": [ + 230, + 376, + 271, + 385 + ], + "type": "text", + "content": "Amer Rez" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 230, + 387, + 281, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 387, + 281, + 396 + ], + "spans": [ + { + "bbox": [ + 230, + 387, + 281, + 396 + ], + "type": "text", + "content": "Amir Saffari" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 230, + 398, + 294, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 398, + 294, + 407 + ], + "spans": [ + { + "bbox": [ + 230, + 398, + 294, + 407 + ], + "type": "text", + "content": "Amit Agarwalla" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 230, + 409, + 282, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 409, + 282, + 418 + ], + "spans": [ + { + "bbox": [ + 230, + 409, + 282, + 418 + ], + "type": "text", + "content": "Amit Jhindal" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 230, + 419, + 299, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 419, + 299, + 429 + ], + "spans": [ + { + "bbox": [ + 230, + 419, + 299, + 429 + ], + "type": "text", + "content": "Amith Mamidala" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 230, + 430, + 290, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 430, + 290, + 439 + ], + "spans": [ + { + "bbox": [ + 230, + 430, + 290, + 439 + ], + "type": "text", + "content": "Ammar Asmro" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 230, + 441, + 300, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 441, + 300, + 451 + ], + "spans": [ + { + "bbox": [ + 230, + 441, + 300, + 451 + ], + "type": "text", + "content": "Amulya Ballakur" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 230, + 453, + 288, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 453, + 288, + 461 + ], + "spans": [ + { + "bbox": [ + 230, + 453, + 288, + 461 + ], + "type": "text", + "content": "Anand Mishra" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 230, + 463, + 298, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 463, + 298, + 472 + ], + "spans": [ + { + "bbox": [ + 230, + 463, + 298, + 472 + ], + "type": "text", + "content": "Anand Sridharan" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 230, + 475, + 312, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 475, + 312, + 483 + ], + "spans": [ + { + "bbox": [ + 230, + 475, + 312, + 483 + ], + "type": "text", + "content": "Anastasiia Dubinina" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 230, + 485, + 279, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 485, + 279, + 495 + ], + "spans": [ + { + "bbox": [ + 230, + 485, + 279, + 495 + ], + "type": "text", + "content": "Andre Lenz" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 230, + 496, + 290, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 496, + 290, + 505 + ], + "spans": [ + { + "bbox": [ + 230, + 496, + 290, + 505 + ], + "type": "text", + "content": "Andreas Doerr" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 230, + 507, + 296, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 507, + 296, + 517 + ], + "spans": [ + { + "bbox": [ + 230, + 507, + 296, + 517 + ], + "type": "text", + "content": "Andrew Keating" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 230, + 518, + 292, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 518, + 292, + 527 + ], + "spans": [ + { + "bbox": [ + 230, + 518, + 292, + 527 + ], + "type": "text", + "content": "Andrew Leaver" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 230, + 529, + 289, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 529, + 289, + 538 + ], + "spans": [ + { + "bbox": [ + 230, + 529, + 289, + 538 + ], + "type": "text", + "content": "Andrew Smith" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 230, + 539, + 288, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 539, + 288, + 548 + ], + "spans": [ + { + "bbox": [ + 230, + 539, + 288, + 548 + ], + "type": "text", + "content": "Andrew Wirth" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 230, + 551, + 280, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 551, + 280, + 560 + ], + "spans": [ + { + "bbox": [ + 230, + 551, + 280, + 560 + ], + "type": "text", + "content": "Andy Davey" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 230, + 562, + 302, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 562, + 302, + 571 + ], + "spans": [ + { + "bbox": [ + 230, + 562, + 302, + 571 + ], + "type": "text", + "content": "Andy Rosenbaum" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 230, + 572, + 276, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 572, + 276, + 582 + ], + "spans": [ + { + "bbox": [ + 230, + 572, + 276, + 582 + ], + "type": "text", + "content": "Andy Sohn" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 230, + 583, + 283, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 583, + 283, + 594 + ], + "spans": [ + { + "bbox": [ + 230, + 583, + 283, + 594 + ], + "type": "text", + "content": "Angela Chan" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 230, + 594, + 307, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 594, + 307, + 604 + ], + "spans": [ + { + "bbox": [ + 230, + 594, + 307, + 604 + ], + "type": "text", + "content": "Aniket Chakrabarti" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 230, + 605, + 304, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 605, + 304, + 614 + ], + "spans": [ + { + "bbox": [ + 230, + 605, + 304, + 614 + ], + "type": "text", + "content": "Anil Ramakrishna" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 230, + 616, + 282, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 616, + 282, + 626 + ], + "spans": [ + { + "bbox": [ + 230, + 616, + 282, + 626 + ], + "type": "text", + "content": "Anirban Roy" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 230, + 628, + 271, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 628, + 271, + 637 + ], + "spans": [ + { + "bbox": [ + 230, + 628, + 271, + 637 + ], + "type": "text", + "content": "Anita Iyer" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 230, + 638, + 316, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 638, + 316, + 647 + ], + "spans": [ + { + "bbox": [ + 230, + 638, + 316, + 647 + ], + "type": "text", + "content": "Anjali Narayan-Chen" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 230, + 649, + 286, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 649, + 286, + 658 + ], + "spans": [ + { + "bbox": [ + 230, + 649, + 286, + 658 + ], + "type": "text", + "content": "Ankith Yennu" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 230, + 659, + 300, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 659, + 300, + 669 + ], + "spans": [ + { + "bbox": [ + 230, + 659, + 300, + 669 + ], + "type": "text", + "content": "Anna Dabrowska" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 230, + 670, + 302, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 670, + 302, + 680 + ], + "spans": [ + { + "bbox": [ + 230, + 670, + 302, + 680 + ], + "type": "text", + "content": "Anna Gawlowska" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 230, + 681, + 299, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 681, + 299, + 692 + ], + "spans": [ + { + "bbox": [ + 230, + 681, + 299, + 692 + ], + "type": "text", + "content": "Anna Rumshisky" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 230, + 693, + 278, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 693, + 278, + 701 + ], + "spans": [ + { + "bbox": [ + 230, + 693, + 278, + 701 + ], + "type": "text", + "content": "Anna Turek" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 230, + 703, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 703, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 230, + 703, + 288, + 714 + ], + "type": "text", + "content": "Anoop Deoras" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 389, + 342, + 463, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 342, + 463, + 353 + ], + "spans": [ + { + "bbox": [ + 389, + 342, + 463, + 353 + ], + "type": "text", + "content": "Anton Bezruchkin" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 389, + 354, + 441, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 354, + 441, + 364 + ], + "spans": [ + { + "bbox": [ + 389, + 354, + 441, + 364 + ], + "type": "text", + "content": "Anup Prasad" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 389, + 365, + 455, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 365, + 455, + 376 + ], + "spans": [ + { + "bbox": [ + 389, + 365, + 455, + 376 + ], + "type": "text", + "content": "Anupam Dewan" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 389, + 376, + 445, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 376, + 445, + 385 + ], + "spans": [ + { + "bbox": [ + 389, + 376, + 445, + 385 + ], + "type": "text", + "content": "Anwith Kiran" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 389, + 387, + 447, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 387, + 447, + 397 + ], + "spans": [ + { + "bbox": [ + 389, + 387, + 447, + 397 + ], + "type": "text", + "content": "Apoory Gupta" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 389, + 398, + 450, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 398, + 450, + 408 + ], + "spans": [ + { + "bbox": [ + 389, + 398, + 450, + 408 + ], + "type": "text", + "content": "Aram Galstyan" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 389, + 409, + 470, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 409, + 470, + 418 + ], + "spans": [ + { + "bbox": [ + 389, + 409, + 470, + 418 + ], + "type": "text", + "content": "Aravind Manoharan" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 389, + 419, + 443, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 419, + 443, + 430 + ], + "spans": [ + { + "bbox": [ + 389, + 419, + 443, + 430 + ], + "type": "text", + "content": "Arijit Biswas" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 389, + 430, + 458, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 430, + 458, + 440 + ], + "spans": [ + { + "bbox": [ + 389, + 430, + 458, + 440 + ], + "type": "text", + "content": "Arindam Mandal" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 389, + 441, + 438, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 441, + 438, + 451 + ], + "spans": [ + { + "bbox": [ + 389, + 441, + 438, + 451 + ], + "type": "text", + "content": "Arpit Gupta" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 389, + 453, + 465, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 453, + 465, + 462 + ], + "spans": [ + { + "bbox": [ + 389, + 453, + 465, + 462 + ], + "type": "text", + "content": "Arsamkhan Pathan" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 389, + 463, + 454, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 463, + 454, + 474 + ], + "spans": [ + { + "bbox": [ + 389, + 463, + 454, + 474 + ], + "type": "text", + "content": "Arun Nagarajan" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 389, + 475, + 478, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 475, + 478, + 484 + ], + "spans": [ + { + "bbox": [ + 389, + 475, + 478, + 484 + ], + "type": "text", + "content": "Arushan Rajasekaram" + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 389, + 485, + 473, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 485, + 473, + 495 + ], + "spans": [ + { + "bbox": [ + 389, + 485, + 473, + 495 + ], + "type": "text", + "content": "Arvind Sundararajan" + } + ] + } + ], + "index": 90 + }, + { + "bbox": [ + 389, + 496, + 457, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 496, + 457, + 505 + ], + "spans": [ + { + "bbox": [ + 389, + 496, + 457, + 505 + ], + "type": "text", + "content": "Ashwin Ganesan" + } + ] + } + ], + "index": 91 + }, + { + "bbox": [ + 389, + 506, + 477, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 506, + 477, + 517 + ], + "spans": [ + { + "bbox": [ + 389, + 506, + 477, + 517 + ], + "type": "text", + "content": "Ashwin Swaminathan" + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 389, + 517, + 482, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 517, + 482, + 527 + ], + "spans": [ + { + "bbox": [ + 389, + 517, + 482, + 527 + ], + "type": "text", + "content": "Athanasios Mouchtaris" + } + ] + } + ], + "index": 93 + }, + { + "bbox": [ + 389, + 529, + 465, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 529, + 465, + 539 + ], + "spans": [ + { + "bbox": [ + 389, + 529, + 465, + 539 + ], + "type": "text", + "content": "Audrey Champeau" + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 389, + 540, + 428, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 540, + 428, + 550 + ], + "spans": [ + { + "bbox": [ + 389, + 540, + 428, + 550 + ], + "type": "text", + "content": "Avik Ray" + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 389, + 551, + 447, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 551, + 447, + 560 + ], + "spans": [ + { + "bbox": [ + 389, + 551, + 447, + 560 + ], + "type": "text", + "content": "Ayush Jaiswal" + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 389, + 562, + 448, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 562, + 448, + 571 + ], + "spans": [ + { + "bbox": [ + 389, + 562, + 448, + 571 + ], + "type": "text", + "content": "Ayush Sharma" + } + ] + } + ], + "index": 97 + }, + { + "bbox": [ + 389, + 572, + 446, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 572, + 446, + 582 + ], + "spans": [ + { + "bbox": [ + 389, + 572, + 446, + 582 + ], + "type": "text", + "content": "Bailey Keefer" + } + ] + } + ], + "index": 98 + }, + { + "bbox": [ + 389, + 583, + 480, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 583, + 480, + 594 + ], + "spans": [ + { + "bbox": [ + 389, + 583, + 480, + 594 + ], + "type": "text", + "content": "Balamurugan Muthiah" + } + ] + } + ], + "index": 99 + }, + { + "bbox": [ + 389, + 594, + 472, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 594, + 472, + 604 + ], + "spans": [ + { + "bbox": [ + 389, + 594, + 472, + 604 + ], + "type": "text", + "content": "Beatrix Leon-Millan" + } + ] + } + ], + "index": 100 + }, + { + "bbox": [ + 389, + 605, + 447, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 605, + 447, + 615 + ], + "spans": [ + { + "bbox": [ + 389, + 605, + 447, + 615 + ], + "type": "text", + "content": "Ben Koopman" + } + ] + } + ], + "index": 101 + }, + { + "bbox": [ + 389, + 616, + 417, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 616, + 417, + 625 + ], + "spans": [ + { + "bbox": [ + 389, + 616, + 417, + 625 + ], + "type": "text", + "content": "Ben Li" + } + ] + } + ], + "index": 102 + }, + { + "bbox": [ + 389, + 627, + 455, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 627, + 455, + 637 + ], + "spans": [ + { + "bbox": [ + 389, + 627, + 455, + 637 + ], + "type": "text", + "content": "Benjamin Biggs" + } + ] + } + ], + "index": 103 + }, + { + "bbox": [ + 389, + 638, + 444, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 638, + 444, + 647 + ], + "spans": [ + { + "bbox": [ + 389, + 638, + 444, + 647 + ], + "type": "text", + "content": "Benjamin Ott" + } + ] + } + ], + "index": 104 + }, + { + "bbox": [ + 389, + 649, + 460, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 649, + 460, + 658 + ], + "spans": [ + { + "bbox": [ + 389, + 649, + 460, + 658 + ], + "type": "text", + "content": "Bhanu Vinzamuri" + } + ] + } + ], + "index": 105 + }, + { + "bbox": [ + 389, + 659, + 465, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 659, + 465, + 669 + ], + "spans": [ + { + "bbox": [ + 389, + 659, + 465, + 669 + ], + "type": "text", + "content": "Bharath Venkatesh" + } + ] + } + ], + "index": 106 + }, + { + "bbox": [ + 389, + 670, + 457, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 670, + 457, + 680 + ], + "spans": [ + { + "bbox": [ + 389, + 670, + 457, + 680 + ], + "type": "text", + "content": "Bhavana Ganesh" + } + ] + } + ], + "index": 107 + }, + { + "bbox": [ + 389, + 681, + 454, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 681, + 454, + 690 + ], + "spans": [ + { + "bbox": [ + 389, + 681, + 454, + 690 + ], + "type": "text", + "content": "Bhoomit Vasani" + } + ] + } + ], + "index": 108 + }, + { + "bbox": [ + 389, + 693, + 432, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 693, + 432, + 702 + ], + "spans": [ + { + "bbox": [ + 389, + 693, + 432, + 702 + ], + "type": "text", + "content": "Bill Byrne" + } + ] + } + ], + "index": 109 + }, + { + "bbox": [ + 389, + 703, + 424, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 703, + 424, + 712 + ], + "spans": [ + { + "bbox": [ + 389, + 703, + 424, + 712 + ], + "type": "text", + "content": "Bill Hsu" + } + ] + } + ], + "index": 110 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 111 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 136, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 136, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 136, + 84 + ], + "type": "text", + "content": "Bincheng Wang" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 84, + 117, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 84, + 117, + 95 + ], + "spans": [ + { + "bbox": [ + 71, + 84, + 117, + 95 + ], + "type": "text", + "content": "Blake King" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 95, + 125, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 95, + 125, + 106 + ], + "spans": [ + { + "bbox": [ + 71, + 95, + 125, + 106 + ], + "type": "text", + "content": "Blazej Gorny" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 71, + 106, + 106, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 106, + 106, + 117 + ], + "spans": [ + { + "bbox": [ + 71, + 106, + 106, + 117 + ], + "type": "text", + "content": "Bo Feng" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 71, + 117, + 111, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 117, + 111, + 127 + ], + "spans": [ + { + "bbox": [ + 71, + 117, + 111, + 127 + ], + "type": "text", + "content": "Bo Zheng" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 128, + 141, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 128, + 141, + 137 + ], + "spans": [ + { + "bbox": [ + 71, + 128, + 141, + 137 + ], + "type": "text", + "content": "Bodhisattwa Paul" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 71, + 139, + 114, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 139, + 114, + 148 + ], + "spans": [ + { + "bbox": [ + 71, + 139, + 114, + 148 + ], + "type": "text", + "content": "Bofan Sun" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 149, + 119, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 149, + 119, + 159 + ], + "spans": [ + { + "bbox": [ + 71, + 149, + 119, + 159 + ], + "type": "text", + "content": "Bofeng Luo" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 160, + 123, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 160, + 123, + 170 + ], + "spans": [ + { + "bbox": [ + 71, + 160, + 123, + 170 + ], + "type": "text", + "content": "Bowen Chen" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 171, + 116, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 171, + 116, + 180 + ], + "spans": [ + { + "bbox": [ + 71, + 171, + 116, + 180 + ], + "type": "text", + "content": "Bowen Xie" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 71, + 182, + 106, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 182, + 106, + 193 + ], + "spans": [ + { + "bbox": [ + 71, + 182, + 106, + 193 + ], + "type": "text", + "content": "Boya Yu" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 71, + 194, + 130, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 194, + 130, + 203 + ], + "spans": [ + { + "bbox": [ + 71, + 194, + 130, + 203 + ], + "type": "text", + "content": "Brendan Jugan" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 71, + 204, + 123, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 204, + 123, + 213 + ], + "spans": [ + { + "bbox": [ + 71, + 204, + 123, + 213 + ], + "type": "text", + "content": "Brett Panosh" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 71, + 215, + 125, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 215, + 125, + 224 + ], + "spans": [ + { + "bbox": [ + 71, + 215, + 125, + 224 + ], + "type": "text", + "content": "Brian Collins" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 71, + 226, + 138, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 226, + 138, + 236 + ], + "spans": [ + { + "bbox": [ + 71, + 226, + 138, + 236 + ], + "type": "text", + "content": "Brian Thompson" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 71, + 237, + 123, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 237, + 123, + 247 + ], + "spans": [ + { + "bbox": [ + 71, + 237, + 123, + 247 + ], + "type": "text", + "content": "Can Karakus" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 71, + 248, + 104, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 248, + 104, + 257 + ], + "spans": [ + { + "bbox": [ + 71, + 248, + 104, + 257 + ], + "type": "text", + "content": "Can Liu" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 71, + 258, + 134, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 258, + 134, + 268 + ], + "spans": [ + { + "bbox": [ + 71, + 258, + 134, + 268 + ], + "type": "text", + "content": "Carl Lambrecht" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 71, + 270, + 110, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 270, + 110, + 279 + ], + "spans": [ + { + "bbox": [ + 71, + 270, + 110, + 279 + ], + "type": "text", + "content": "Carly Lin" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 71, + 281, + 129, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 281, + 129, + 291 + ], + "spans": [ + { + "bbox": [ + 71, + 281, + 129, + 291 + ], + "type": "text", + "content": "Carolyn Wang" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 71, + 292, + 119, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 292, + 119, + 301 + ], + "spans": [ + { + "bbox": [ + 71, + 292, + 119, + 301 + ], + "type": "text", + "content": "Carrie Yuan" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 71, + 303, + 124, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 303, + 124, + 312 + ], + "spans": [ + { + "bbox": [ + 71, + 303, + 124, + 312 + ], + "type": "text", + "content": "Casey Loyda" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 71, + 313, + 136, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 313, + 136, + 323 + ], + "spans": [ + { + "bbox": [ + 71, + 313, + 136, + 323 + ], + "type": "text", + "content": "Cezary Walczak" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 71, + 324, + 148, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 324, + 148, + 335 + ], + "spans": [ + { + "bbox": [ + 71, + 324, + 148, + 335 + ], + "type": "text", + "content": "Chalapathi Choppa" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 71, + 335, + 170, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 335, + 170, + 345 + ], + "spans": [ + { + "bbox": [ + 71, + 335, + 170, + 345 + ], + "type": "text", + "content": "Chandana Satya Prakash" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 71, + 346, + 167, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 346, + 167, + 356 + ], + "spans": [ + { + "bbox": [ + 71, + 346, + 167, + 356 + ], + "type": "text", + "content": "Chankrisna Richy Meas" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 71, + 357, + 124, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 357, + 124, + 367 + ], + "spans": [ + { + "bbox": [ + 71, + 357, + 124, + 367 + ], + "type": "text", + "content": "Charith Peris" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 71, + 368, + 137, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 368, + 137, + 377 + ], + "spans": [ + { + "bbox": [ + 71, + 368, + 137, + 377 + ], + "type": "text", + "content": "Charles Recaido" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 71, + 379, + 115, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 379, + 115, + 388 + ], + "spans": [ + { + "bbox": [ + 71, + 379, + 115, + 388 + ], + "type": "text", + "content": "Charlie Xu" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 71, + 389, + 131, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 389, + 131, + 399 + ], + "spans": [ + { + "bbox": [ + 71, + 389, + 131, + 399 + ], + "type": "text", + "content": "Charul Sharma" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 71, + 400, + 127, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 400, + 127, + 410 + ], + "spans": [ + { + "bbox": [ + 71, + 400, + 127, + 410 + ], + "type": "text", + "content": "Chase Kernan" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 71, + 411, + 151, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 411, + 151, + 422 + ], + "spans": [ + { + "bbox": [ + 71, + 411, + 151, + 422 + ], + "type": "text", + "content": "Chayut Thanapirom" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 71, + 422, + 125, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 422, + 125, + 432 + ], + "spans": [ + { + "bbox": [ + 71, + 422, + 125, + 432 + ], + "type": "text", + "content": "Chengwei Su" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 71, + 434, + 121, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 434, + 121, + 442 + ], + "spans": [ + { + "bbox": [ + 71, + 434, + 121, + 442 + ], + "type": "text", + "content": "Chenhao Xu" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 71, + 445, + 124, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 445, + 124, + 453 + ], + "spans": [ + { + "bbox": [ + 71, + 445, + 124, + 453 + ], + "type": "text", + "content": "Chenhao Yin" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 71, + 455, + 118, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 455, + 118, + 464 + ], + "spans": [ + { + "bbox": [ + 71, + 455, + 118, + 464 + ], + "type": "text", + "content": "Chentao Ye" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 71, + 466, + 129, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 466, + 129, + 476 + ], + "spans": [ + { + "bbox": [ + 71, + 466, + 129, + 476 + ], + "type": "text", + "content": "Chenyang Tao" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 71, + 477, + 165, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 477, + 165, + 487 + ], + "spans": [ + { + "bbox": [ + 71, + 477, + 165, + 487 + ], + "type": "text", + "content": "Chethan Parameshwara" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 71, + 487, + 144, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 487, + 144, + 498 + ], + "spans": [ + { + "bbox": [ + 71, + 487, + 144, + 498 + ], + "type": "text", + "content": "Ching-Yun Chang" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 71, + 499, + 109, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 499, + 109, + 509 + ], + "spans": [ + { + "bbox": [ + 71, + 499, + 109, + 509 + ], + "type": "text", + "content": "Chong Li" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 71, + 510, + 121, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 510, + 121, + 519 + ], + "spans": [ + { + "bbox": [ + 71, + 510, + 121, + 519 + ], + "type": "text", + "content": "Chris Hench" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 71, + 521, + 114, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 521, + 114, + 529 + ], + "spans": [ + { + "bbox": [ + 71, + 521, + 114, + 529 + ], + "type": "text", + "content": "Chris Tran" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 71, + 532, + 145, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 532, + 145, + 542 + ], + "spans": [ + { + "bbox": [ + 71, + 532, + 145, + 542 + ], + "type": "text", + "content": "Christophe Dupuy" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 71, + 543, + 144, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 543, + 144, + 553 + ], + "spans": [ + { + "bbox": [ + 71, + 543, + 144, + 553 + ], + "type": "text", + "content": "Christopher Davis" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 71, + 554, + 156, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 554, + 156, + 563 + ], + "spans": [ + { + "bbox": [ + 71, + 554, + 156, + 563 + ], + "type": "text", + "content": "Christopher DiPersio" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 71, + 564, + 186, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 564, + 186, + 574 + ], + "spans": [ + { + "bbox": [ + 71, + 564, + 186, + 574 + ], + "type": "text", + "content": "Christos Christodoulopoulos" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 71, + 575, + 112, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 575, + 112, + 585 + ], + "spans": [ + { + "bbox": [ + 71, + 575, + 112, + 585 + ], + "type": "text", + "content": "Christy Li" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 71, + 586, + 116, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 586, + 116, + 595 + ], + "spans": [ + { + "bbox": [ + 71, + 586, + 116, + 595 + ], + "type": "text", + "content": "Chun Chen" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 71, + 597, + 147, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 597, + 147, + 606 + ], + "spans": [ + { + "bbox": [ + 71, + 597, + 147, + 606 + ], + "type": "text", + "content": "Claudio Delli Bovi" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 71, + 608, + 135, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 608, + 135, + 619 + ], + "spans": [ + { + "bbox": [ + 71, + 608, + 135, + 619 + ], + "type": "text", + "content": "Clement Chung" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 71, + 619, + 128, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 619, + 128, + 628 + ], + "spans": [ + { + "bbox": [ + 71, + 619, + 128, + 628 + ], + "type": "text", + "content": "Cole Hawkins" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 71, + 630, + 129, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 630, + 129, + 639 + ], + "spans": [ + { + "bbox": [ + 71, + 630, + 129, + 639 + ], + "type": "text", + "content": "Connor Harris" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 71, + 641, + 124, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 641, + 124, + 651 + ], + "spans": [ + { + "bbox": [ + 71, + 641, + 124, + 651 + ], + "type": "text", + "content": "Corey Ropell" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 71, + 651, + 118, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 651, + 118, + 662 + ], + "spans": [ + { + "bbox": [ + 71, + 651, + 118, + 662 + ], + "type": "text", + "content": "Cynthia He" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 71, + 662, + 102, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 662, + 102, + 671 + ], + "spans": [ + { + "bbox": [ + 71, + 662, + 102, + 671 + ], + "type": "text", + "content": "DK Joo" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 71, + 673, + 137, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 673, + 137, + 684 + ], + "spans": [ + { + "bbox": [ + 71, + 673, + 137, + 684 + ], + "type": "text", + "content": "Dae Yon Hwang" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 71, + 685, + 116, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 685, + 116, + 693 + ], + "spans": [ + { + "bbox": [ + 71, + 685, + 116, + 693 + ], + "type": "text", + "content": "Dan Rosen" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 71, + 695, + 127, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 695, + 127, + 704 + ], + "spans": [ + { + "bbox": [ + 71, + 695, + 127, + 704 + ], + "type": "text", + "content": "Daniel Elkind" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 71, + 706, + 129, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 706, + 129, + 715 + ], + "spans": [ + { + "bbox": [ + 71, + 706, + 129, + 715 + ], + "type": "text", + "content": "Daniel Pressel" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 230, + 72, + 286, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 72, + 286, + 84 + ], + "spans": [ + { + "bbox": [ + 230, + 72, + 286, + 84 + ], + "type": "text", + "content": "Daniel Zhang" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 230, + 84, + 299, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 84, + 299, + 94 + ], + "spans": [ + { + "bbox": [ + 230, + 84, + 299, + 94 + ], + "type": "text", + "content": "Danielle Kimball" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 230, + 95, + 290, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 95, + 290, + 104 + ], + "spans": [ + { + "bbox": [ + 230, + 95, + 290, + 104 + ], + "type": "text", + "content": "Daniil Sorokin" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 230, + 106, + 286, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 106, + 286, + 115 + ], + "spans": [ + { + "bbox": [ + 230, + 106, + 286, + 115 + ], + "type": "text", + "content": "Dave Goodell" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 230, + 117, + 293, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 117, + 293, + 125 + ], + "spans": [ + { + "bbox": [ + 230, + 117, + 293, + 125 + ], + "type": "text", + "content": "Davide Modolo" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 230, + 127, + 276, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 127, + 276, + 137 + ], + "spans": [ + { + "bbox": [ + 230, + 127, + 276, + 137 + ], + "type": "text", + "content": "Dawei Zhu" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 230, + 138, + 298, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 138, + 298, + 148 + ], + "spans": [ + { + "bbox": [ + 230, + 138, + 298, + 148 + ], + "type": "text", + "content": "Deepikaa Suresh" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 230, + 149, + 285, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 149, + 285, + 159 + ], + "spans": [ + { + "bbox": [ + 230, + 149, + 285, + 159 + ], + "type": "text", + "content": "Deepti Raga" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 230, + 160, + 298, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 160, + 298, + 170 + ], + "spans": [ + { + "bbox": [ + 230, + 160, + 298, + 170 + ], + "type": "text", + "content": "Denis Filimonov" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 230, + 171, + 296, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 171, + 296, + 180 + ], + "spans": [ + { + "bbox": [ + 230, + 171, + 296, + 180 + ], + "type": "text", + "content": "Denis Foo Kune" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 230, + 182, + 345, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 182, + 345, + 193 + ], + "spans": [ + { + "bbox": [ + 230, + 182, + 345, + 193 + ], + "type": "text", + "content": "Denis Romasanta Rodriguez" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 230, + 194, + 317, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 194, + 317, + 203 + ], + "spans": [ + { + "bbox": [ + 230, + 194, + 317, + 203 + ], + "type": "text", + "content": "Devamanyu Hazarika" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 230, + 204, + 295, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 204, + 295, + 214 + ], + "spans": [ + { + "bbox": [ + 230, + 204, + 295, + 214 + ], + "type": "text", + "content": "Dhananjay Ram" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 230, + 215, + 290, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 215, + 290, + 224 + ], + "spans": [ + { + "bbox": [ + 230, + 215, + 290, + 224 + ], + "type": "text", + "content": "Dhawal Parkar" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 230, + 226, + 284, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 226, + 284, + 236 + ], + "spans": [ + { + "bbox": [ + 230, + 226, + 284, + 236 + ], + "type": "text", + "content": "Dhawal Patel" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 230, + 237, + 290, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 237, + 290, + 246 + ], + "spans": [ + { + "bbox": [ + 230, + 237, + 290, + 246 + ], + "type": "text", + "content": "Dhwanil Desai" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 230, + 248, + 314, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 248, + 314, + 258 + ], + "spans": [ + { + "bbox": [ + 230, + 248, + 314, + 258 + ], + "type": "text", + "content": "Dinesh Singh Rajput" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 230, + 259, + 275, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 259, + 275, + 268 + ], + "spans": [ + { + "bbox": [ + 230, + 259, + 275, + 268 + ], + "type": "text", + "content": "Disha Sule" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 230, + 270, + 291, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 270, + 291, + 280 + ], + "spans": [ + { + "bbox": [ + 230, + 270, + 291, + 280 + ], + "type": "text", + "content": "Diwakar Singh" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 230, + 281, + 293, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 281, + 293, + 290 + ], + "spans": [ + { + "bbox": [ + 230, + 281, + 293, + 290 + ], + "type": "text", + "content": "Dmitriy Genzel" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 230, + 292, + 303, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 292, + 303, + 301 + ], + "spans": [ + { + "bbox": [ + 230, + 292, + 303, + 301 + ], + "type": "text", + "content": "Dolly Goldenberg" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 230, + 303, + 275, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 303, + 275, + 312 + ], + "spans": [ + { + "bbox": [ + 230, + 303, + 275, + 312 + ], + "type": "text", + "content": "Dongyi He" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 230, + 313, + 296, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 313, + 296, + 323 + ], + "spans": [ + { + "bbox": [ + 230, + 313, + 296, + 323 + ], + "type": "text", + "content": "Dumitru Hanciu" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 230, + 324, + 298, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 324, + 298, + 334 + ], + "spans": [ + { + "bbox": [ + 230, + 324, + 298, + 334 + ], + "type": "text", + "content": "Dushan Tharmal" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 230, + 335, + 311, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 335, + 311, + 345 + ], + "spans": [ + { + "bbox": [ + 230, + 335, + 311, + 345 + ], + "type": "text", + "content": "Dzmitry Siankovich" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 230, + 346, + 279, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 346, + 279, + 355 + ], + "spans": [ + { + "bbox": [ + 230, + 346, + 279, + 355 + ], + "type": "text", + "content": "Edi Cikovic" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 230, + 357, + 296, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 357, + 296, + 366 + ], + "spans": [ + { + "bbox": [ + 230, + 357, + 296, + 366 + ], + "type": "text", + "content": "Edwin Abraham" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 230, + 368, + 286, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 368, + 286, + 377 + ], + "spans": [ + { + "bbox": [ + 230, + 368, + 286, + 377 + ], + "type": "text", + "content": "Ekraam Sabir" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 230, + 379, + 282, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 379, + 282, + 388 + ], + "spans": [ + { + "bbox": [ + 230, + 379, + 282, + 388 + ], + "type": "text", + "content": "Elliott Olson" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 230, + 389, + 291, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 389, + 291, + 399 + ], + "spans": [ + { + "bbox": [ + 230, + 389, + 291, + 399 + ], + "type": "text", + "content": "Emmett Steven" + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 230, + 400, + 277, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 400, + 277, + 410 + ], + "spans": [ + { + "bbox": [ + 230, + 400, + 277, + 410 + ], + "type": "text", + "content": "Emre Barut" + } + ] + } + ], + "index": 90 + }, + { + "bbox": [ + 230, + 411, + 282, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 411, + 282, + 421 + ], + "spans": [ + { + "bbox": [ + 230, + 411, + 282, + 421 + ], + "type": "text", + "content": "Eric Jackson" + } + ] + } + ], + "index": 91 + }, + { + "bbox": [ + 230, + 422, + 270, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 422, + 270, + 432 + ], + "spans": [ + { + "bbox": [ + 230, + 422, + 270, + 432 + ], + "type": "text", + "content": "Ethan Wu" + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 230, + 434, + 282, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 434, + 282, + 444 + ], + "spans": [ + { + "bbox": [ + 230, + 434, + 282, + 444 + ], + "type": "text", + "content": "Evelyn Chen" + } + ] + } + ], + "index": 93 + }, + { + "bbox": [ + 230, + 445, + 314, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 445, + 314, + 454 + ], + "spans": [ + { + "bbox": [ + 230, + 445, + 314, + 454 + ], + "type": "text", + "content": "Ezhilan Mahalingam" + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 230, + 455, + 308, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 455, + 308, + 464 + ], + "spans": [ + { + "bbox": [ + 230, + 455, + 308, + 464 + ], + "type": "text", + "content": "Fabian Triefenbach" + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 230, + 466, + 269, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 466, + 269, + 476 + ], + "spans": [ + { + "bbox": [ + 230, + 466, + 269, + 476 + ], + "type": "text", + "content": "Fan Yang" + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 230, + 477, + 277, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 477, + 277, + 487 + ], + "spans": [ + { + "bbox": [ + 230, + 477, + 277, + 487 + ], + "type": "text", + "content": "Fangyu Liu" + } + ] + } + ], + "index": 97 + }, + { + "bbox": [ + 230, + 488, + 269, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 488, + 269, + 497 + ], + "spans": [ + { + "bbox": [ + 230, + 488, + 269, + 497 + ], + "type": "text", + "content": "Fanzi Wu" + } + ] + } + ], + "index": 98 + }, + { + "bbox": [ + 230, + 498, + 290, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 498, + 290, + 508 + ], + "spans": [ + { + "bbox": [ + 230, + 498, + 290, + 508 + ], + "type": "text", + "content": "Faraz Tavakoli" + } + ] + } + ], + "index": 99 + }, + { + "bbox": [ + 230, + 510, + 307, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 510, + 307, + 519 + ], + "spans": [ + { + "bbox": [ + 230, + 510, + 307, + 519 + ], + "type": "text", + "content": "Farhad Khozeimeh" + } + ] + } + ], + "index": 100 + }, + { + "bbox": [ + 230, + 521, + 280, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 521, + 280, + 531 + ], + "spans": [ + { + "bbox": [ + 230, + 521, + 280, + 531 + ], + "type": "text", + "content": "Feiyang Niu" + } + ] + } + ], + "index": 101 + }, + { + "bbox": [ + 230, + 532, + 281, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 532, + 281, + 541 + ], + "spans": [ + { + "bbox": [ + 230, + 532, + 281, + 541 + ], + "type": "text", + "content": "Felix Hieber" + } + ] + } + ], + "index": 102 + }, + { + "bbox": [ + 230, + 543, + 262, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 543, + 262, + 552 + ], + "spans": [ + { + "bbox": [ + 230, + 543, + 262, + 552 + ], + "type": "text", + "content": "Feng Li" + } + ] + } + ], + "index": 103 + }, + { + "bbox": [ + 230, + 554, + 276, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 554, + 276, + 563 + ], + "spans": [ + { + "bbox": [ + 230, + 554, + 276, + 563 + ], + "type": "text", + "content": "First Elbey" + } + ] + } + ], + "index": 104 + }, + { + "bbox": [ + 230, + 564, + 286, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 564, + 286, + 574 + ], + "spans": [ + { + "bbox": [ + 230, + 564, + 286, + 574 + ], + "type": "text", + "content": "Florian Krebs" + } + ] + } + ], + "index": 105 + }, + { + "bbox": [ + 230, + 575, + 286, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 575, + 286, + 586 + ], + "spans": [ + { + "bbox": [ + 230, + 575, + 286, + 586 + ], + "type": "text", + "content": "Florian Saupe" + } + ] + } + ], + "index": 106 + }, + { + "bbox": [ + 230, + 586, + 300, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 586, + 300, + 597 + ], + "spans": [ + { + "bbox": [ + 230, + 586, + 300, + 597 + ], + "type": "text", + "content": "Florian Sprunken" + } + ] + } + ], + "index": 107 + }, + { + "bbox": [ + 230, + 597, + 271, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 597, + 271, + 606 + ], + "spans": [ + { + "bbox": [ + 230, + 597, + 271, + 606 + ], + "type": "text", + "content": "Frank Fan" + } + ] + } + ], + "index": 108 + }, + { + "bbox": [ + 230, + 608, + 283, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 608, + 283, + 618 + ], + "spans": [ + { + "bbox": [ + 230, + 608, + 283, + 618 + ], + "type": "text", + "content": "Furqan Khan" + } + ] + } + ], + "index": 109 + }, + { + "bbox": [ + 230, + 619, + 320, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 619, + 320, + 628 + ], + "spans": [ + { + "bbox": [ + 230, + 619, + 320, + 628 + ], + "type": "text", + "content": "Gabriela De Vincenzo" + } + ] + } + ], + "index": 110 + }, + { + "bbox": [ + 230, + 629, + 300, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 629, + 300, + 640 + ], + "spans": [ + { + "bbox": [ + 230, + 629, + 300, + 640 + ], + "type": "text", + "content": "Gagandeep Kang" + } + ] + } + ], + "index": 111 + }, + { + "bbox": [ + 230, + 641, + 282, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 641, + 282, + 651 + ], + "spans": [ + { + "bbox": [ + 230, + 641, + 282, + 651 + ], + "type": "text", + "content": "George Ding" + } + ] + } + ], + "index": 112 + }, + { + "bbox": [ + 230, + 651, + 274, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 651, + 274, + 662 + ], + "spans": [ + { + "bbox": [ + 230, + 651, + 274, + 662 + ], + "type": "text", + "content": "George He" + } + ] + } + ], + "index": 113 + }, + { + "bbox": [ + 230, + 662, + 288, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 662, + 288, + 673 + ], + "spans": [ + { + "bbox": [ + 230, + 662, + 288, + 673 + ], + "type": "text", + "content": "George Yeung" + } + ] + } + ], + "index": 114 + }, + { + "bbox": [ + 230, + 673, + 301, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 673, + 301, + 683 + ], + "spans": [ + { + "bbox": [ + 230, + 673, + 301, + 683 + ], + "type": "text", + "content": "Ghada Qaddoumi" + } + ] + } + ], + "index": 115 + }, + { + "bbox": [ + 230, + 685, + 324, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 685, + 324, + 693 + ], + "spans": [ + { + "bbox": [ + 230, + 685, + 324, + 693 + ], + "type": "text", + "content": "Giannis Karamanolakis" + } + ] + } + ], + "index": 116 + }, + { + "bbox": [ + 230, + 695, + 307, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 695, + 307, + 705 + ], + "spans": [ + { + "bbox": [ + 230, + 695, + 307, + 705 + ], + "type": "text", + "content": "Goeric Huybrechts" + } + ] + } + ], + "index": 117 + }, + { + "bbox": [ + 230, + 706, + 291, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 706, + 291, + 715 + ], + "spans": [ + { + "bbox": [ + 230, + 706, + 291, + 715 + ], + "type": "text", + "content": "Gokul Maddali" + } + ] + } + ], + "index": 118 + }, + { + "bbox": [ + 389, + 72, + 457, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 72, + 457, + 83 + ], + "spans": [ + { + "bbox": [ + 389, + 72, + 457, + 83 + ], + "type": "text", + "content": "Gonzalo Iglesias" + } + ] + } + ], + "index": 119 + }, + { + "bbox": [ + 389, + 84, + 461, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 84, + 461, + 94 + ], + "spans": [ + { + "bbox": [ + 389, + 84, + 461, + 94 + ], + "type": "text", + "content": "Gordon McShane" + } + ] + } + ], + "index": 120 + }, + { + "bbox": [ + 389, + 95, + 441, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 95, + 441, + 104 + ], + "spans": [ + { + "bbox": [ + 389, + 95, + 441, + 104 + ], + "type": "text", + "content": "Gozde Sahin" + } + ] + } + ], + "index": 121 + }, + { + "bbox": [ + 389, + 106, + 456, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 106, + 456, + 116 + ], + "spans": [ + { + "bbox": [ + 389, + 106, + 456, + 116 + ], + "type": "text", + "content": "Guangtai Huang" + } + ] + } + ], + "index": 122 + }, + { + "bbox": [ + 389, + 117, + 458, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 117, + 458, + 127 + ], + "spans": [ + { + "bbox": [ + 389, + 117, + 458, + 127 + ], + "type": "text", + "content": "Gukyeong Kwon" + } + ] + } + ], + "index": 123 + }, + { + "bbox": [ + 389, + 128, + 479, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 128, + 479, + 138 + ], + "spans": [ + { + "bbox": [ + 389, + 128, + 479, + 138 + ], + "type": "text", + "content": "Gunnar A. Sigurdsson" + } + ] + } + ], + "index": 124 + }, + { + "bbox": [ + 389, + 139, + 459, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 139, + 459, + 148 + ], + "spans": [ + { + "bbox": [ + 389, + 139, + 459, + 148 + ], + "type": "text", + "content": "Gurpreet Chadha" + } + ] + } + ], + "index": 125 + }, + { + "bbox": [ + 389, + 149, + 452, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 149, + 452, + 160 + ], + "spans": [ + { + "bbox": [ + 389, + 149, + 452, + 160 + ], + "type": "text", + "content": "Gururaj Kosuru" + } + ] + } + ], + "index": 126 + }, + { + "bbox": [ + 389, + 161, + 462, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 161, + 462, + 171 + ], + "spans": [ + { + "bbox": [ + 389, + 161, + 462, + 171 + ], + "type": "text", + "content": "Hagen Fuerstenau" + } + ] + } + ], + "index": 127 + }, + { + "bbox": [ + 389, + 171, + 426, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 171, + 426, + 180 + ], + "spans": [ + { + "bbox": [ + 389, + 171, + 426, + 180 + ], + "type": "text", + "content": "Hah Hah" + } + ] + } + ], + "index": 128 + }, + { + "bbox": [ + 389, + 182, + 446, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 182, + 446, + 192 + ], + "spans": [ + { + "bbox": [ + 389, + 182, + 446, + 192 + ], + "type": "text", + "content": "Haja Maideen" + } + ] + } + ], + "index": 129 + }, + { + "bbox": [ + 389, + 194, + 464, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 194, + 464, + 203 + ], + "spans": [ + { + "bbox": [ + 389, + 194, + 464, + 203 + ], + "type": "text", + "content": "Hajime Hosokawa" + } + ] + } + ], + "index": 130 + }, + { + "bbox": [ + 389, + 204, + 424, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 204, + 424, + 213 + ], + "spans": [ + { + "bbox": [ + 389, + 204, + 424, + 213 + ], + "type": "text", + "content": "Han Liu" + } + ] + } + ], + "index": 131 + }, + { + "bbox": [ + 389, + 215, + 443, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 215, + 443, + 224 + ], + "spans": [ + { + "bbox": [ + 389, + 215, + 443, + 224 + ], + "type": "text", + "content": "Han-Kai Hsu" + } + ] + } + ], + "index": 132 + }, + { + "bbox": [ + 389, + 226, + 437, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 226, + 437, + 236 + ], + "spans": [ + { + "bbox": [ + 389, + 226, + 437, + 236 + ], + "type": "text", + "content": "Hann Wang" + } + ] + } + ], + "index": 133 + }, + { + "bbox": [ + 389, + 237, + 418, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 237, + 418, + 246 + ], + "spans": [ + { + "bbox": [ + 389, + 237, + 418, + 246 + ], + "type": "text", + "content": "Hao Li" + } + ] + } + ], + "index": 134 + }, + { + "bbox": [ + 389, + 248, + 430, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 248, + 430, + 258 + ], + "spans": [ + { + "bbox": [ + 389, + 248, + 430, + 258 + ], + "type": "text", + "content": "Hao Yang" + } + ] + } + ], + "index": 135 + }, + { + "bbox": [ + 389, + 259, + 443, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 259, + 443, + 269 + ], + "spans": [ + { + "bbox": [ + 389, + 259, + 443, + 269 + ], + "type": "text", + "content": "Haofeng Zhu" + } + ] + } + ], + "index": 136 + }, + { + "bbox": [ + 389, + 270, + 447, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 270, + 447, + 280 + ], + "spans": [ + { + "bbox": [ + 389, + 270, + 447, + 280 + ], + "type": "text", + "content": "Haozheng Fan" + } + ] + } + ], + "index": 137 + }, + { + "bbox": [ + 389, + 281, + 448, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 281, + 448, + 291 + ], + "spans": [ + { + "bbox": [ + 389, + 281, + 448, + 291 + ], + "type": "text", + "content": "Harman Singh" + } + ] + } + ], + "index": 138 + }, + { + "bbox": [ + 389, + 292, + 488, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 292, + 488, + 301 + ], + "spans": [ + { + "bbox": [ + 389, + 292, + 488, + 301 + ], + "type": "text", + "content": "Harshavardhan Kaluvala" + } + ] + } + ], + "index": 139 + }, + { + "bbox": [ + 389, + 303, + 447, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 303, + 447, + 312 + ], + "spans": [ + { + "bbox": [ + 389, + 303, + 447, + 312 + ], + "type": "text", + "content": "Hashim Saeed" + } + ] + } + ], + "index": 140 + }, + { + "bbox": [ + 389, + 313, + 419, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 313, + 419, + 322 + ], + "spans": [ + { + "bbox": [ + 389, + 313, + 419, + 322 + ], + "type": "text", + "content": "He Xie" + } + ] + } + ], + "index": 141 + }, + { + "bbox": [ + 389, + 324, + 439, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 324, + 439, + 334 + ], + "spans": [ + { + "bbox": [ + 389, + 324, + 439, + 334 + ], + "type": "text", + "content": "Helian Feng" + } + ] + } + ], + "index": 142 + }, + { + "bbox": [ + 389, + 335, + 425, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 335, + 425, + 345 + ], + "spans": [ + { + "bbox": [ + 389, + 335, + 425, + 345 + ], + "type": "text", + "content": "Hendrix" + } + ] + } + ], + "index": 143 + }, + { + "bbox": [ + 389, + 346, + 439, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 346, + 439, + 356 + ], + "spans": [ + { + "bbox": [ + 389, + 346, + 439, + 356 + ], + "type": "text", + "content": "Hengzhi Pei" + } + ] + } + ], + "index": 144 + }, + { + "bbox": [ + 389, + 357, + 451, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 357, + 451, + 366 + ], + "spans": [ + { + "bbox": [ + 389, + 357, + 451, + 366 + ], + "type": "text", + "content": "Henrik Nielsen" + } + ] + } + ], + "index": 145 + }, + { + "bbox": [ + 389, + 368, + 436, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 368, + 436, + 377 + ], + "spans": [ + { + "bbox": [ + 389, + 368, + 436, + 377 + ], + "type": "text", + "content": "Hesam Ilati" + } + ] + } + ], + "index": 146 + }, + { + "bbox": [ + 389, + 379, + 454, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 379, + 454, + 388 + ], + "spans": [ + { + "bbox": [ + 389, + 379, + 454, + 388 + ], + "type": "text", + "content": "Himanshu Patel" + } + ] + } + ], + "index": 147 + }, + { + "bbox": [ + 389, + 389, + 442, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 389, + 442, + 399 + ], + "spans": [ + { + "bbox": [ + 389, + 389, + 442, + 399 + ], + "type": "text", + "content": "Hongshan Li" + } + ] + } + ], + "index": 148 + }, + { + "bbox": [ + 389, + 401, + 448, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 401, + 448, + 411 + ], + "spans": [ + { + "bbox": [ + 389, + 401, + 448, + 411 + ], + "type": "text", + "content": "Hongzhou Lin" + } + ] + } + ], + "index": 149 + }, + { + "bbox": [ + 389, + 411, + 444, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 411, + 444, + 421 + ], + "spans": [ + { + "bbox": [ + 389, + 411, + 444, + 421 + ], + "type": "text", + "content": "Hussain Raza" + } + ] + } + ], + "index": 150 + }, + { + "bbox": [ + 389, + 422, + 440, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 422, + 440, + 431 + ], + "spans": [ + { + "bbox": [ + 389, + 422, + 440, + 431 + ], + "type": "text", + "content": "Ian Cullinan" + } + ] + } + ], + "index": 151 + }, + { + "bbox": [ + 389, + 434, + 429, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 434, + 429, + 442 + ], + "spans": [ + { + "bbox": [ + 389, + 434, + 429, + 442 + ], + "type": "text", + "content": "Imre Kiss" + } + ] + } + ], + "index": 152 + }, + { + "bbox": [ + 389, + 445, + 481, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 445, + 481, + 454 + ], + "spans": [ + { + "bbox": [ + 389, + 445, + 481, + 454 + ], + "type": "text", + "content": "Inbarasan Thangamani" + } + ] + } + ], + "index": 153 + }, + { + "bbox": [ + 389, + 456, + 466, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 456, + 466, + 464 + ], + "spans": [ + { + "bbox": [ + 389, + 456, + 466, + 464 + ], + "type": "text", + "content": "Indrayani Fadnavis" + } + ] + } + ], + "index": 154 + }, + { + "bbox": [ + 389, + 466, + 477, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 466, + 477, + 475 + ], + "spans": [ + { + "bbox": [ + 389, + 466, + 477, + 475 + ], + "type": "text", + "content": "Ionut Teodor Sorodoc" + } + ] + } + ], + "index": 155 + }, + { + "bbox": [ + 389, + 477, + 441, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 477, + 441, + 486 + ], + "spans": [ + { + "bbox": [ + 389, + 477, + 441, + 486 + ], + "type": "text", + "content": "Irem Ertuerk" + } + ] + } + ], + "index": 156 + }, + { + "bbox": [ + 389, + 488, + 470, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 488, + 470, + 498 + ], + "spans": [ + { + "bbox": [ + 389, + 488, + 470, + 498 + ], + "type": "text", + "content": "Iryna Yemialyanava" + } + ] + } + ], + "index": 157 + }, + { + "bbox": [ + 389, + 499, + 432, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 499, + 432, + 508 + ], + "spans": [ + { + "bbox": [ + 389, + 499, + 432, + 508 + ], + "type": "text", + "content": "Ishan Soni" + } + ] + } + ], + "index": 158 + }, + { + "bbox": [ + 389, + 510, + 435, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 510, + 435, + 518 + ], + "spans": [ + { + "bbox": [ + 389, + 510, + 435, + 518 + ], + "type": "text", + "content": "Ismail Jelal" + } + ] + } + ], + "index": 159 + }, + { + "bbox": [ + 389, + 521, + 425, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 521, + 425, + 529 + ], + "spans": [ + { + "bbox": [ + 389, + 521, + 425, + 529 + ], + "type": "text", + "content": "Ivan Tse" + } + ] + } + ], + "index": 160 + }, + { + "bbox": [ + 389, + 532, + 453, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 532, + 453, + 540 + ], + "spans": [ + { + "bbox": [ + 389, + 532, + 453, + 540 + ], + "type": "text", + "content": "Jack FitzGerald" + } + ] + } + ], + "index": 161 + }, + { + "bbox": [ + 389, + 543, + 431, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 543, + 431, + 552 + ], + "spans": [ + { + "bbox": [ + 389, + 543, + 431, + 552 + ], + "type": "text", + "content": "Jack Zhao" + } + ] + } + ], + "index": 162 + }, + { + "bbox": [ + 389, + 554, + 458, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 554, + 458, + 563 + ], + "spans": [ + { + "bbox": [ + 389, + 554, + 458, + 563 + ], + "type": "text", + "content": "Jackson Rothgeb" + } + ] + } + ], + "index": 163 + }, + { + "bbox": [ + 389, + 564, + 430, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 564, + 430, + 574 + ], + "spans": [ + { + "bbox": [ + 389, + 564, + 430, + 574 + ], + "type": "text", + "content": "Jacky Lee" + } + ] + } + ], + "index": 164 + }, + { + "bbox": [ + 389, + 575, + 429, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 575, + 429, + 586 + ], + "spans": [ + { + "bbox": [ + 389, + 575, + 429, + 586 + ], + "type": "text", + "content": "Jake Jung" + } + ] + } + ], + "index": 165 + }, + { + "bbox": [ + 389, + 586, + 444, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 586, + 444, + 595 + ], + "spans": [ + { + "bbox": [ + 389, + 586, + 444, + 595 + ], + "type": "text", + "content": "Jakub Debski" + } + ] + } + ], + "index": 166 + }, + { + "bbox": [ + 389, + 597, + 452, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 597, + 452, + 606 + ], + "spans": [ + { + "bbox": [ + 389, + 597, + 452, + 606 + ], + "type": "text", + "content": "Jakub Tomczak" + } + ] + } + ], + "index": 167 + }, + { + "bbox": [ + 389, + 608, + 435, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 608, + 435, + 616 + ], + "spans": [ + { + "bbox": [ + 389, + 608, + 435, + 616 + ], + "type": "text", + "content": "James Jeun" + } + ] + } + ], + "index": 168 + }, + { + "bbox": [ + 389, + 619, + 448, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 619, + 448, + 628 + ], + "spans": [ + { + "bbox": [ + 389, + 619, + 448, + 628 + ], + "type": "text", + "content": "James Sanders" + } + ] + } + ], + "index": 169 + }, + { + "bbox": [ + 389, + 630, + 448, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 630, + 448, + 640 + ], + "spans": [ + { + "bbox": [ + 389, + 630, + 448, + 640 + ], + "type": "text", + "content": "Jason Crowley" + } + ] + } + ], + "index": 170 + }, + { + "bbox": [ + 389, + 641, + 421, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 641, + 421, + 651 + ], + "spans": [ + { + "bbox": [ + 389, + 641, + 421, + 651 + ], + "type": "text", + "content": "Jay Lee" + } + ] + } + ], + "index": 171 + }, + { + "bbox": [ + 389, + 651, + 494, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 651, + 494, + 662 + ], + "spans": [ + { + "bbox": [ + 389, + 651, + 494, + 662 + ], + "type": "text", + "content": "Jayakrishna Anvesh Paidy" + } + ] + } + ], + "index": 172 + }, + { + "bbox": [ + 389, + 662, + 444, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 662, + 444, + 673 + ], + "spans": [ + { + "bbox": [ + 389, + 662, + 444, + 673 + ], + "type": "text", + "content": "Jayant Tiwari" + } + ] + } + ], + "index": 173 + }, + { + "bbox": [ + 389, + 673, + 439, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 673, + 439, + 682 + ], + "spans": [ + { + "bbox": [ + 389, + 673, + 439, + 682 + ], + "type": "text", + "content": "Jean Farmer" + } + ] + } + ], + "index": 174 + }, + { + "bbox": [ + 389, + 685, + 442, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 685, + 442, + 694 + ], + "spans": [ + { + "bbox": [ + 389, + 685, + 442, + 694 + ], + "type": "text", + "content": "Jeff Solinsky" + } + ] + } + ], + "index": 175 + }, + { + "bbox": [ + 389, + 696, + 430, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 696, + 430, + 704 + ], + "spans": [ + { + "bbox": [ + 389, + 696, + 430, + 704 + ], + "type": "text", + "content": "Jenna Lau" + } + ] + } + ], + "index": 176 + }, + { + "bbox": [ + 389, + 706, + 461, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 706, + 461, + 716 + ], + "spans": [ + { + "bbox": [ + 389, + 706, + 461, + 716 + ], + "type": "text", + "content": "Jeremy Savareese" + } + ] + } + ], + "index": 177 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 178 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 130, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 130, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 130, + 83 + ], + "type": "text", + "content": "Jerzy Zagorski" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 84, + 95, + 93 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 84, + 95, + 93 + ], + "spans": [ + { + "bbox": [ + 70, + 84, + 95, + 93 + ], + "type": "text", + "content": "Ji Dai" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 95, + 141, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 95, + 141, + 106 + ], + "spans": [ + { + "bbox": [ + 70, + 95, + 141, + 106 + ], + "type": "text", + "content": "Jiacheng (JC) Gu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 106, + 107, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 106, + 107, + 115 + ], + "spans": [ + { + "bbox": [ + 70, + 106, + 107, + 115 + ], + "type": "text", + "content": "Jiahui Li" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 117, + 151, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 117, + 151, + 128 + ], + "spans": [ + { + "bbox": [ + 70, + 117, + 151, + 128 + ], + "type": "text", + "content": "Jian (Skyler) Zheng" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 129, + 116, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 129, + 116, + 137 + ], + "spans": [ + { + "bbox": [ + 70, + 129, + 116, + 137 + ], + "type": "text", + "content": "Jianhua Lu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 139, + 127, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 139, + 127, + 149 + ], + "spans": [ + { + "bbox": [ + 70, + 139, + 127, + 149 + ], + "type": "text", + "content": "Jianhua Wang" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 150, + 113, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 150, + 113, + 159 + ], + "spans": [ + { + "bbox": [ + 70, + 150, + 113, + 159 + ], + "type": "text", + "content": "Jiawei Dai" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 160, + 113, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 160, + 113, + 169 + ], + "spans": [ + { + "bbox": [ + 70, + 160, + 113, + 169 + ], + "type": "text", + "content": "Jiawei Mo" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 171, + 105, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 171, + 105, + 180 + ], + "spans": [ + { + "bbox": [ + 70, + 171, + 105, + 180 + ], + "type": "text", + "content": "Jiaxi Xu" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 182, + 108, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 108, + 193 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 108, + 193 + ], + "type": "text", + "content": "Jie Liang" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 194, + 106, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 194, + 106, + 204 + ], + "spans": [ + { + "bbox": [ + 70, + 194, + 106, + 204 + ], + "type": "text", + "content": "Jie Yang" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 205, + 113, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 205, + 113, + 214 + ], + "spans": [ + { + "bbox": [ + 70, + 205, + 113, + 214 + ], + "type": "text", + "content": "Jim Logan" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 215, + 135, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 215, + 135, + 225 + ], + "spans": [ + { + "bbox": [ + 70, + 215, + 135, + 225 + ], + "type": "text", + "content": "Jimit Majmudar" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 226, + 104, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 226, + 104, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 226, + 104, + 236 + ], + "type": "text", + "content": "Jing Liu" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 237, + 131, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 237, + 131, + 247 + ], + "spans": [ + { + "bbox": [ + 70, + 237, + 131, + 247 + ], + "type": "text", + "content": "Jinghong Miao" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 248, + 108, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 248, + 108, + 258 + ], + "spans": [ + { + "bbox": [ + 70, + 248, + 108, + 258 + ], + "type": "text", + "content": "Jingru Yi" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 259, + 121, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 259, + 121, + 269 + ], + "spans": [ + { + "bbox": [ + 70, + 259, + 121, + 269 + ], + "type": "text", + "content": "Jingyang Jin" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 270, + 121, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 270, + 121, + 279 + ], + "spans": [ + { + "bbox": [ + 70, + 270, + 121, + 279 + ], + "type": "text", + "content": "Jiun-Yu Kao" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 281, + 123, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 281, + 123, + 291 + ], + "spans": [ + { + "bbox": [ + 70, + 281, + 123, + 291 + ], + "type": "text", + "content": "Jixuan Wang" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 293, + 123, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 293, + 123, + 302 + ], + "spans": [ + { + "bbox": [ + 70, + 293, + 123, + 302 + ], + "type": "text", + "content": "Jiyang Wang" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 303, + 130, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 303, + 130, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 303, + 130, + 312 + ], + "type": "text", + "content": "Joe Pemberton" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 314, + 120, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 314, + 120, + 323 + ], + "spans": [ + { + "bbox": [ + 70, + 314, + 120, + 323 + ], + "type": "text", + "content": "Joel Carlson" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 70, + 324, + 126, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 126, + 334 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 126, + 334 + ], + "type": "text", + "content": "Joey Blundell" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 70, + 335, + 130, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 335, + 130, + 345 + ], + "spans": [ + { + "bbox": [ + 70, + 335, + 130, + 345 + ], + "type": "text", + "content": "John Chin-Jew" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 70, + 346, + 104, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 346, + 104, + 355 + ], + "spans": [ + { + "bbox": [ + 70, + 346, + 104, + 355 + ], + "type": "text", + "content": "John He" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 70, + 357, + 121, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 357, + 121, + 367 + ], + "spans": [ + { + "bbox": [ + 70, + 357, + 121, + 367 + ], + "type": "text", + "content": "Jonathan Ho" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 70, + 369, + 138, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 369, + 138, + 377 + ], + "spans": [ + { + "bbox": [ + 70, + 369, + 138, + 377 + ], + "type": "text", + "content": "Jonathan Hueser" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 70, + 379, + 128, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 379, + 128, + 388 + ], + "spans": [ + { + "bbox": [ + 70, + 379, + 128, + 388 + ], + "type": "text", + "content": "Jonathan Lunt" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 70, + 390, + 127, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 390, + 127, + 399 + ], + "spans": [ + { + "bbox": [ + 70, + 390, + 127, + 399 + ], + "type": "text", + "content": "Jooyoung Lee" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 70, + 401, + 116, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 401, + 116, + 411 + ], + "spans": [ + { + "bbox": [ + 70, + 401, + 116, + 411 + ], + "type": "text", + "content": "Joshua Tan" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 70, + 412, + 137, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 412, + 137, + 422 + ], + "spans": [ + { + "bbox": [ + 70, + 412, + 137, + 422 + ], + "type": "text", + "content": "Joyjit Chatterjee" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 70, + 423, + 130, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 423, + 130, + 433 + ], + "spans": [ + { + "bbox": [ + 70, + 423, + 130, + 433 + ], + "type": "text", + "content": "Judith Gaspers" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 70, + 434, + 110, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 434, + 110, + 444 + ], + "spans": [ + { + "bbox": [ + 70, + 434, + 110, + 444 + ], + "type": "text", + "content": "Jue Wang" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 70, + 445, + 107, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 445, + 107, + 455 + ], + "spans": [ + { + "bbox": [ + 70, + 445, + 107, + 455 + ], + "type": "text", + "content": "Jun Fang" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 70, + 456, + 107, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 456, + 107, + 465 + ], + "spans": [ + { + "bbox": [ + 70, + 456, + 107, + 465 + ], + "type": "text", + "content": "Jun Tang" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 70, + 467, + 106, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 467, + 106, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 467, + 106, + 475 + ], + "type": "text", + "content": "Jun Wan" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 70, + 477, + 102, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 477, + 102, + 486 + ], + "spans": [ + { + "bbox": [ + 70, + 477, + 102, + 486 + ], + "type": "text", + "content": "Jun Wu" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 70, + 488, + 121, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 488, + 121, + 498 + ], + "spans": [ + { + "bbox": [ + 70, + 488, + 121, + 498 + ], + "type": "text", + "content": "Junlei Wang" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 70, + 499, + 109, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 499, + 109, + 509 + ], + "spans": [ + { + "bbox": [ + 70, + 499, + 109, + 509 + ], + "type": "text", + "content": "Junyi Shi" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 70, + 510, + 116, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 510, + 116, + 519 + ], + "spans": [ + { + "bbox": [ + 70, + 510, + 116, + 519 + ], + "type": "text", + "content": "Justin Chiu" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 70, + 521, + 130, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 130, + 530 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 130, + 530 + ], + "type": "text", + "content": "Justin Satriano" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 70, + 532, + 112, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 532, + 112, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 532, + 112, + 540 + ], + "type": "text", + "content": "Justin Yee" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 70, + 543, + 132, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 543, + 132, + 552 + ], + "spans": [ + { + "bbox": [ + 70, + 543, + 132, + 552 + ], + "type": "text", + "content": "Jwala Dhamala" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 70, + 554, + 120, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 554, + 120, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 554, + 120, + 563 + ], + "type": "text", + "content": "Jyoti Bansal" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 70, + 564, + 108, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 564, + 108, + 574 + ], + "spans": [ + { + "bbox": [ + 70, + 564, + 108, + 574 + ], + "type": "text", + "content": "Kai Zhen" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 70, + 575, + 133, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 575, + 133, + 586 + ], + "spans": [ + { + "bbox": [ + 70, + 575, + 133, + 586 + ], + "type": "text", + "content": "Kai-Wei Chang" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 70, + 586, + 124, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 586, + 124, + 597 + ], + "spans": [ + { + "bbox": [ + 70, + 586, + 124, + 597 + ], + "type": "text", + "content": "Kaixiang Lin" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 70, + 597, + 130, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 597, + 130, + 607 + ], + "spans": [ + { + "bbox": [ + 70, + 597, + 130, + 607 + ], + "type": "text", + "content": "Kalyan Raman" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 70, + 609, + 201, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 609, + 201, + 618 + ], + "spans": [ + { + "bbox": [ + 70, + 609, + 201, + 618 + ], + "type": "text", + "content": "Kanthashree Mysore Sathyendra" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 70, + 619, + 130, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 619, + 130, + 628 + ], + "spans": [ + { + "bbox": [ + 70, + 619, + 130, + 628 + ], + "type": "text", + "content": "Karabo Moroe" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 70, + 630, + 145, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 630, + 145, + 639 + ], + "spans": [ + { + "bbox": [ + 70, + 630, + 145, + 639 + ], + "type": "text", + "content": "Karan Bhandarkar" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 70, + 641, + 128, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 641, + 128, + 650 + ], + "spans": [ + { + "bbox": [ + 70, + 641, + 128, + 650 + ], + "type": "text", + "content": "Karan Kothari" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 70, + 652, + 154, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 652, + 154, + 661 + ], + "spans": [ + { + "bbox": [ + 70, + 652, + 154, + 661 + ], + "type": "text", + "content": "Karolina Owczarzak" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 70, + 662, + 161, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 662, + 161, + 673 + ], + "spans": [ + { + "bbox": [ + 70, + 662, + 161, + 673 + ], + "type": "text", + "content": "Karthick Gopalswamy" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 70, + 673, + 127, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 673, + 127, + 682 + ], + "spans": [ + { + "bbox": [ + 70, + 673, + 127, + 682 + ], + "type": "text", + "content": "Karthick Ravi" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 70, + 685, + 162, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 685, + 162, + 693 + ], + "spans": [ + { + "bbox": [ + 70, + 685, + 162, + 693 + ], + "type": "text", + "content": "Karthik Ramakrishnan" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 70, + 696, + 154, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 154, + 705 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 154, + 705 + ], + "type": "text", + "content": "Karthika Arumugam" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 70, + 706, + 125, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 706, + 125, + 715 + ], + "spans": [ + { + "bbox": [ + 70, + 706, + 125, + 715 + ], + "type": "text", + "content": "Kartik Mehta" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 230, + 73, + 321, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 73, + 321, + 83 + ], + "spans": [ + { + "bbox": [ + 230, + 73, + 321, + 83 + ], + "type": "text", + "content": "Katarzyna Konczalska" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 230, + 84, + 303, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 84, + 303, + 94 + ], + "spans": [ + { + "bbox": [ + 230, + 84, + 303, + 94 + ], + "type": "text", + "content": "Kavya Ravikumar" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 230, + 95, + 263, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 95, + 263, + 104 + ], + "spans": [ + { + "bbox": [ + 230, + 95, + 263, + 104 + ], + "type": "text", + "content": "Ke Tran" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 230, + 106, + 278, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 106, + 278, + 116 + ], + "spans": [ + { + "bbox": [ + 230, + 106, + 278, + 116 + ], + "type": "text", + "content": "Kochen Qin" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 230, + 118, + 264, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 118, + 264, + 125 + ], + "spans": [ + { + "bbox": [ + 230, + 118, + 264, + 125 + ], + "type": "text", + "content": "Kelin Li" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 230, + 128, + 269, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 128, + 269, + 137 + ], + "spans": [ + { + "bbox": [ + 230, + 128, + 269, + 137 + ], + "type": "text", + "content": "Kelvin Li" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 230, + 139, + 292, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 139, + 292, + 148 + ], + "spans": [ + { + "bbox": [ + 230, + 139, + 292, + 148 + ], + "type": "text", + "content": "Ketan Kulkarni" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 230, + 150, + 330, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 150, + 330, + 160 + ], + "spans": [ + { + "bbox": [ + 230, + 150, + 330, + 160 + ], + "type": "text", + "content": "Kevin Angelo Rodrigues" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 230, + 161, + 277, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 161, + 277, + 171 + ], + "spans": [ + { + "bbox": [ + 230, + 161, + 277, + 171 + ], + "type": "text", + "content": "Keyur Patel" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 230, + 171, + 299, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 171, + 299, + 181 + ], + "spans": [ + { + "bbox": [ + 230, + 171, + 299, + 181 + ], + "type": "text", + "content": "Khadige Abboud" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 230, + 182, + 283, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 182, + 283, + 193 + ], + "spans": [ + { + "bbox": [ + 230, + 182, + 283, + 193 + ], + "type": "text", + "content": "Kiana Hajebi" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 230, + 194, + 281, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 194, + 281, + 202 + ], + "spans": [ + { + "bbox": [ + 230, + 194, + 281, + 202 + ], + "type": "text", + "content": "Klaus Reiter" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 230, + 204, + 280, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 204, + 280, + 213 + ], + "spans": [ + { + "bbox": [ + 230, + 204, + 280, + 213 + ], + "type": "text", + "content": "Kris Schultz" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 230, + 215, + 299, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 215, + 299, + 225 + ], + "spans": [ + { + "bbox": [ + 230, + 215, + 299, + 225 + ], + "type": "text", + "content": "Krishna Anisetty" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 230, + 226, + 298, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 226, + 298, + 236 + ], + "spans": [ + { + "bbox": [ + 230, + 226, + 298, + 236 + ], + "type": "text", + "content": "Krishna Kotnana" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 230, + 237, + 272, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 237, + 272, + 246 + ], + "spans": [ + { + "bbox": [ + 230, + 237, + 272, + 246 + ], + "type": "text", + "content": "Kristen Li" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 230, + 248, + 340, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 248, + 340, + 258 + ], + "spans": [ + { + "bbox": [ + 230, + 248, + 340, + 258 + ], + "type": "text", + "content": "Kruthi Channamallikarjuna" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 230, + 259, + 315, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 259, + 315, + 269 + ], + "spans": [ + { + "bbox": [ + 230, + 259, + 315, + 269 + ], + "type": "text", + "content": "Krzysztof Jakubczyk" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 230, + 270, + 290, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 270, + 290, + 280 + ], + "spans": [ + { + "bbox": [ + 230, + 270, + 290, + 280 + ], + "type": "text", + "content": "Kuba Pierewoj" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 230, + 281, + 270, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 281, + 270, + 289 + ], + "spans": [ + { + "bbox": [ + 230, + 281, + 270, + 289 + ], + "type": "text", + "content": "Kunal Pal" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 230, + 292, + 302, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 292, + 302, + 300 + ], + "spans": [ + { + "bbox": [ + 230, + 292, + 302, + 300 + ], + "type": "text", + "content": "Kunwar Srivastav" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 230, + 303, + 298, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 303, + 298, + 312 + ], + "spans": [ + { + "bbox": [ + 230, + 303, + 298, + 312 + ], + "type": "text", + "content": "Kyle Bannerman" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 230, + 313, + 288, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 313, + 288, + 323 + ], + "spans": [ + { + "bbox": [ + 230, + 313, + 288, + 323 + ], + "type": "text", + "content": "Lahari Poddar" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 230, + 324, + 294, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 324, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 230, + 324, + 294, + 334 + ], + "type": "text", + "content": "Lakshmi Prasad" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 230, + 335, + 280, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 335, + 280, + 345 + ], + "spans": [ + { + "bbox": [ + 230, + 335, + 280, + 345 + ], + "type": "text", + "content": "Larry Tseng" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 230, + 346, + 296, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 346, + 296, + 356 + ], + "spans": [ + { + "bbox": [ + 230, + 346, + 296, + 356 + ], + "type": "text", + "content": "Laxmikant Naik" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 230, + 357, + 342, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 357, + 342, + 366 + ], + "spans": [ + { + "bbox": [ + 230, + 357, + 342, + 366 + ], + "type": "text", + "content": "Leena Chennuru Vankadara" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 230, + 369, + 294, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 369, + 294, + 377 + ], + "spans": [ + { + "bbox": [ + 230, + 369, + 294, + 377 + ], + "type": "text", + "content": "Lenon Minorics" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 230, + 379, + 263, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 379, + 263, + 388 + ], + "spans": [ + { + "bbox": [ + 230, + 379, + 263, + 388 + ], + "type": "text", + "content": "Leo Liu" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 230, + 390, + 295, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 390, + 295, + 399 + ], + "spans": [ + { + "bbox": [ + 230, + 390, + 295, + 399 + ], + "type": "text", + "content": "Leonard Lausen" + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 230, + 401, + 323, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 401, + 323, + 410 + ], + "spans": [ + { + "bbox": [ + 230, + 401, + 323, + 410 + ], + "type": "text", + "content": "Leonardo F. R. Ribeiro" + } + ] + } + ], + "index": 90 + }, + { + "bbox": [ + 230, + 412, + 268, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 412, + 268, + 421 + ], + "spans": [ + { + "bbox": [ + 230, + 412, + 268, + 421 + ], + "type": "text", + "content": "Li Zhang" + } + ] + } + ], + "index": 91 + }, + { + "bbox": [ + 230, + 423, + 289, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 423, + 289, + 431 + ], + "spans": [ + { + "bbox": [ + 230, + 423, + 289, + 431 + ], + "type": "text", + "content": "Lili Gehorsam" + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 230, + 434, + 262, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 434, + 262, + 443 + ], + "spans": [ + { + "bbox": [ + 230, + 434, + 262, + 443 + ], + "type": "text", + "content": "Ling Qi" + } + ] + } + ], + "index": 93 + }, + { + "bbox": [ + 230, + 445, + 274, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 445, + 274, + 453 + ], + "spans": [ + { + "bbox": [ + 230, + 445, + 274, + 453 + ], + "type": "text", + "content": "Lisa Bauer" + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 230, + 456, + 277, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 456, + 277, + 465 + ], + "spans": [ + { + "bbox": [ + 230, + 456, + 277, + 465 + ], + "type": "text", + "content": "Lori Knapp" + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 230, + 468, + 264, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 468, + 264, + 476 + ], + "spans": [ + { + "bbox": [ + 230, + 468, + 264, + 476 + ], + "type": "text", + "content": "Lu Zeng" + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 230, + 478, + 277, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 478, + 277, + 487 + ], + "spans": [ + { + "bbox": [ + 230, + 478, + 277, + 487 + ], + "type": "text", + "content": "Lucas Tong" + } + ] + } + ], + "index": 97 + }, + { + "bbox": [ + 230, + 489, + 276, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 489, + 276, + 498 + ], + "spans": [ + { + "bbox": [ + 230, + 489, + 276, + 498 + ], + "type": "text", + "content": "Lulu Wong" + } + ] + } + ], + "index": 98 + }, + { + "bbox": [ + 230, + 500, + 283, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 500, + 283, + 508 + ], + "spans": [ + { + "bbox": [ + 230, + 500, + 283, + 508 + ], + "type": "text", + "content": "Luoxin Chen" + } + ] + } + ], + "index": 99 + }, + { + "bbox": [ + 230, + 510, + 297, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 510, + 297, + 520 + ], + "spans": [ + { + "bbox": [ + 230, + 510, + 297, + 520 + ], + "type": "text", + "content": "Maciej Rudnicki" + } + ] + } + ], + "index": 100 + }, + { + "bbox": [ + 230, + 521, + 301, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 521, + 301, + 530 + ], + "spans": [ + { + "bbox": [ + 230, + 521, + 301, + 530 + ], + "type": "text", + "content": "Mahdi Namazifar" + } + ] + } + ], + "index": 101 + }, + { + "bbox": [ + 230, + 532, + 308, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 532, + 308, + 540 + ], + "spans": [ + { + "bbox": [ + 230, + 532, + 308, + 540 + ], + "type": "text", + "content": "Mahesh Jaliminche" + } + ] + } + ], + "index": 102 + }, + { + "bbox": [ + 230, + 543, + 314, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 543, + 314, + 552 + ], + "spans": [ + { + "bbox": [ + 230, + 543, + 314, + 552 + ], + "type": "text", + "content": "Maira Ladeira Tanke" + } + ] + } + ], + "index": 103 + }, + { + "bbox": [ + 230, + 554, + 287, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 554, + 287, + 563 + ], + "spans": [ + { + "bbox": [ + 230, + 554, + 287, + 563 + ], + "type": "text", + "content": "Manasi Gupta" + } + ] + } + ], + "index": 104 + }, + { + "bbox": [ + 230, + 564, + 304, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 564, + 304, + 574 + ], + "spans": [ + { + "bbox": [ + 230, + 564, + 304, + 574 + ], + "type": "text", + "content": "Mandeep Ahlawat" + } + ] + } + ], + "index": 105 + }, + { + "bbox": [ + 230, + 575, + 288, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 575, + 288, + 585 + ], + "spans": [ + { + "bbox": [ + 230, + 575, + 288, + 585 + ], + "type": "text", + "content": "Mani Khanuja" + } + ] + } + ], + "index": 106 + }, + { + "bbox": [ + 230, + 586, + 294, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 586, + 294, + 596 + ], + "spans": [ + { + "bbox": [ + 230, + 586, + 294, + 596 + ], + "type": "text", + "content": "Mani Sundaram" + } + ] + } + ], + "index": 107 + }, + { + "bbox": [ + 230, + 597, + 282, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 597, + 282, + 607 + ], + "spans": [ + { + "bbox": [ + 230, + 597, + 282, + 607 + ], + "type": "text", + "content": "Marcin Leyk" + } + ] + } + ], + "index": 108 + }, + { + "bbox": [ + 230, + 609, + 306, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 609, + 306, + 617 + ], + "spans": [ + { + "bbox": [ + 230, + 609, + 306, + 617 + ], + "type": "text", + "content": "Mariusz Momotko" + } + ] + } + ], + "index": 109 + }, + { + "bbox": [ + 230, + 619, + 288, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 619, + 288, + 628 + ], + "spans": [ + { + "bbox": [ + 230, + 619, + 288, + 628 + ], + "type": "text", + "content": "Markus Boese" + } + ] + } + ], + "index": 110 + }, + { + "bbox": [ + 230, + 630, + 291, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 630, + 291, + 639 + ], + "spans": [ + { + "bbox": [ + 230, + 630, + 291, + 639 + ], + "type": "text", + "content": "Markus Dreyer" + } + ] + } + ], + "index": 111 + }, + { + "bbox": [ + 230, + 641, + 296, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 641, + 296, + 650 + ], + "spans": [ + { + "bbox": [ + 230, + 641, + 296, + 650 + ], + "type": "text", + "content": "Markus Mueller" + } + ] + } + ], + "index": 112 + }, + { + "bbox": [ + 230, + 652, + 271, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 652, + 271, + 661 + ], + "spans": [ + { + "bbox": [ + 230, + 652, + 271, + 661 + ], + "type": "text", + "content": "Mason Fu" + } + ] + } + ], + "index": 113 + }, + { + "bbox": [ + 230, + 662, + 294, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 662, + 294, + 672 + ], + "spans": [ + { + "bbox": [ + 230, + 662, + 294, + 672 + ], + "type": "text", + "content": "Mateusz Górski" + } + ] + } + ], + "index": 114 + }, + { + "bbox": [ + 230, + 673, + 321, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 673, + 321, + 683 + ], + "spans": [ + { + "bbox": [ + 230, + 673, + 321, + 683 + ], + "type": "text", + "content": "Mateusz Mastalerczyk" + } + ] + } + ], + "index": 115 + }, + { + "bbox": [ + 230, + 685, + 282, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 685, + 282, + 693 + ], + "spans": [ + { + "bbox": [ + 230, + 685, + 282, + 693 + ], + "type": "text", + "content": "Matias Mora" + } + ] + } + ], + "index": 116 + }, + { + "bbox": [ + 230, + 696, + 285, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 696, + 285, + 704 + ], + "spans": [ + { + "bbox": [ + 230, + 696, + 285, + 704 + ], + "type": "text", + "content": "Matt Johnson" + } + ] + } + ], + "index": 117 + }, + { + "bbox": [ + 230, + 706, + 272, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 706, + 272, + 715 + ], + "spans": [ + { + "bbox": [ + 230, + 706, + 272, + 715 + ], + "type": "text", + "content": "Matt Scott" + } + ] + } + ], + "index": 118 + }, + { + "bbox": [ + 389, + 73, + 446, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 73, + 446, + 83 + ], + "spans": [ + { + "bbox": [ + 389, + 73, + 446, + 83 + ], + "type": "text", + "content": "Matthew Wen" + } + ] + } + ], + "index": 119 + }, + { + "bbox": [ + 389, + 84, + 443, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 84, + 443, + 95 + ], + "spans": [ + { + "bbox": [ + 389, + 84, + 443, + 95 + ], + "type": "text", + "content": "Max Barysau" + } + ] + } + ], + "index": 120 + }, + { + "bbox": [ + 389, + 95, + 467, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 95, + 467, + 105 + ], + "spans": [ + { + "bbox": [ + 389, + 95, + 467, + 105 + ], + "type": "text", + "content": "Maya Bouerdassi" + } + ] + } + ], + "index": 121 + }, + { + "bbox": [ + 389, + 106, + 451, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 106, + 451, + 116 + ], + "spans": [ + { + "bbox": [ + 389, + 106, + 451, + 116 + ], + "type": "text", + "content": "Maya Krishnan" + } + ] + } + ], + "index": 122 + }, + { + "bbox": [ + 389, + 118, + 449, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 118, + 449, + 127 + ], + "spans": [ + { + "bbox": [ + 389, + 118, + 449, + 127 + ], + "type": "text", + "content": "Mayank Gupta" + } + ] + } + ], + "index": 123 + }, + { + "bbox": [ + 389, + 129, + 450, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 129, + 450, + 138 + ], + "spans": [ + { + "bbox": [ + 389, + 129, + 450, + 138 + ], + "type": "text", + "content": "Mayank Hirani" + } + ] + } + ], + "index": 124 + }, + { + "bbox": [ + 389, + 139, + 460, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 139, + 460, + 148 + ], + "spans": [ + { + "bbox": [ + 389, + 139, + 460, + 148 + ], + "type": "text", + "content": "Mayank Kulkarni" + } + ] + } + ], + "index": 125 + }, + { + "bbox": [ + 389, + 150, + 501, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 150, + 501, + 160 + ], + "spans": [ + { + "bbox": [ + 389, + 150, + 501, + 160 + ], + "type": "text", + "content": "Meganathan Narayanasamy" + } + ] + } + ], + "index": 126 + }, + { + "bbox": [ + 389, + 161, + 462, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 161, + 462, + 170 + ], + "spans": [ + { + "bbox": [ + 389, + 161, + 462, + 170 + ], + "type": "text", + "content": "Melanie Bradford" + } + ] + } + ], + "index": 127 + }, + { + "bbox": [ + 389, + 171, + 446, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 171, + 446, + 180 + ], + "spans": [ + { + "bbox": [ + 389, + 171, + 446, + 180 + ], + "type": "text", + "content": "Melanie Gens" + } + ] + } + ], + "index": 128 + }, + { + "bbox": [ + 389, + 182, + 447, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 182, + 447, + 191 + ], + "spans": [ + { + "bbox": [ + 389, + 182, + 447, + 191 + ], + "type": "text", + "content": "Melissa Burke" + } + ] + } + ], + "index": 129 + }, + { + "bbox": [ + 389, + 194, + 427, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 194, + 427, + 203 + ], + "spans": [ + { + "bbox": [ + 389, + 194, + 427, + 203 + ], + "type": "text", + "content": "Meng Jin" + } + ] + } + ], + "index": 130 + }, + { + "bbox": [ + 389, + 205, + 434, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 205, + 434, + 213 + ], + "spans": [ + { + "bbox": [ + 389, + 205, + 434, + 213 + ], + "type": "text", + "content": "Miao Chen" + } + ] + } + ], + "index": 131 + }, + { + "bbox": [ + 389, + 215, + 470, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 215, + 470, + 224 + ], + "spans": [ + { + "bbox": [ + 389, + 215, + 470, + 224 + ], + "type": "text", + "content": "Michael Denkowski" + } + ] + } + ], + "index": 132 + }, + { + "bbox": [ + 389, + 226, + 457, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 226, + 457, + 236 + ], + "spans": [ + { + "bbox": [ + 389, + 226, + 457, + 236 + ], + "type": "text", + "content": "Michael Heymel" + } + ] + } + ], + "index": 133 + }, + { + "bbox": [ + 389, + 237, + 479, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 237, + 479, + 247 + ], + "spans": [ + { + "bbox": [ + 389, + 237, + 479, + 247 + ], + "type": "text", + "content": "Michael Krestyaninov" + } + ] + } + ], + "index": 134 + }, + { + "bbox": [ + 389, + 248, + 449, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 248, + 449, + 257 + ], + "spans": [ + { + "bbox": [ + 389, + 248, + 449, + 257 + ], + "type": "text", + "content": "Michal Obirek" + } + ] + } + ], + "index": 135 + }, + { + "bbox": [ + 389, + 259, + 488, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 259, + 488, + 268 + ], + "spans": [ + { + "bbox": [ + 389, + 259, + 488, + 268 + ], + "type": "text", + "content": "Michalina Wichorowska" + } + ] + } + ], + "index": 136 + }, + { + "bbox": [ + 389, + 270, + 445, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 270, + 445, + 278 + ], + "spans": [ + { + "bbox": [ + 389, + 270, + 445, + 278 + ], + "type": "text", + "content": "Michal Miotk" + } + ] + } + ], + "index": 137 + }, + { + "bbox": [ + 389, + 281, + 454, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 281, + 454, + 289 + ], + "spans": [ + { + "bbox": [ + 389, + 281, + 454, + 289 + ], + "type": "text", + "content": "Milosz Watroba" + } + ] + } + ], + "index": 138 + }, + { + "bbox": [ + 389, + 292, + 444, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 292, + 444, + 302 + ], + "spans": [ + { + "bbox": [ + 389, + 292, + 444, + 302 + ], + "type": "text", + "content": "Mingyi Hong" + } + ] + } + ], + "index": 139 + }, + { + "bbox": [ + 389, + 303, + 438, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 303, + 438, + 312 + ], + "spans": [ + { + "bbox": [ + 389, + 303, + 438, + 312 + ], + "type": "text", + "content": "Mingzhi Yu" + } + ] + } + ], + "index": 140 + }, + { + "bbox": [ + 389, + 314, + 440, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 314, + 440, + 323 + ], + "spans": [ + { + "bbox": [ + 389, + 314, + 440, + 323 + ], + "type": "text", + "content": "Miranda Liu" + } + ] + } + ], + "index": 141 + }, + { + "bbox": [ + 389, + 324, + 460, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 324, + 460, + 334 + ], + "spans": [ + { + "bbox": [ + 389, + 324, + 460, + 334 + ], + "type": "text", + "content": "Mohamed Gouda" + } + ] + } + ], + "index": 142 + }, + { + "bbox": [ + 389, + 335, + 485, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 335, + 485, + 345 + ], + "spans": [ + { + "bbox": [ + 389, + 335, + 485, + 345 + ], + "type": "text", + "content": "Mohammad El-Shabani" + } + ] + } + ], + "index": 143 + }, + { + "bbox": [ + 389, + 346, + 497, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 346, + 497, + 355 + ], + "spans": [ + { + "bbox": [ + 389, + 346, + 497, + 355 + ], + "type": "text", + "content": "Mohammad Ghavamzadeh" + } + ] + } + ], + "index": 144 + }, + { + "bbox": [ + 389, + 357, + 444, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 357, + 444, + 366 + ], + "spans": [ + { + "bbox": [ + 389, + 357, + 444, + 366 + ], + "type": "text", + "content": "Mohit Bansal" + } + ] + } + ], + "index": 145 + }, + { + "bbox": [ + 389, + 369, + 452, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 369, + 452, + 377 + ], + "spans": [ + { + "bbox": [ + 389, + 369, + 452, + 377 + ], + "type": "text", + "content": "Morteza Ziyadi" + } + ] + } + ], + "index": 146 + }, + { + "bbox": [ + 389, + 380, + 424, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 380, + 424, + 388 + ], + "spans": [ + { + "bbox": [ + 389, + 380, + 424, + 388 + ], + "type": "text", + "content": "Nan Xia" + } + ] + } + ], + "index": 147 + }, + { + "bbox": [ + 389, + 390, + 448, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 390, + 448, + 399 + ], + "spans": [ + { + "bbox": [ + 389, + 390, + 448, + 399 + ], + "type": "text", + "content": "Nathan Susanj" + } + ] + } + ], + "index": 148 + }, + { + "bbox": [ + 389, + 401, + 436, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 401, + 436, + 410 + ], + "spans": [ + { + "bbox": [ + 389, + 401, + 436, + 410 + ], + "type": "text", + "content": "Nav Bhasin" + } + ] + } + ], + "index": 149 + }, + { + "bbox": [ + 389, + 412, + 452, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 412, + 452, + 421 + ], + "spans": [ + { + "bbox": [ + 389, + 412, + 452, + 421 + ], + "type": "text", + "content": "Neha Goswami" + } + ] + } + ], + "index": 150 + }, + { + "bbox": [ + 389, + 422, + 462, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 422, + 462, + 432 + ], + "spans": [ + { + "bbox": [ + 389, + 422, + 462, + 432 + ], + "type": "text", + "content": "Nehal Belgamwar" + } + ] + } + ], + "index": 151 + }, + { + "bbox": [ + 389, + 434, + 476, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 434, + 476, + 443 + ], + "spans": [ + { + "bbox": [ + 389, + 434, + 476, + 443 + ], + "type": "text", + "content": "Nicolas Anastassacos" + } + ] + } + ], + "index": 152 + }, + { + "bbox": [ + 389, + 445, + 459, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 445, + 459, + 454 + ], + "spans": [ + { + "bbox": [ + 389, + 445, + 459, + 454 + ], + "type": "text", + "content": "Nicolas Bergeron" + } + ] + } + ], + "index": 153 + }, + { + "bbox": [ + 389, + 456, + 431, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 456, + 431, + 464 + ], + "spans": [ + { + "bbox": [ + 389, + 456, + 431, + 464 + ], + "type": "text", + "content": "Nidhi Jain" + } + ] + } + ], + "index": 154 + }, + { + "bbox": [ + 389, + 467, + 431, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 467, + 431, + 475 + ], + "spans": [ + { + "bbox": [ + 389, + 467, + 431, + 475 + ], + "type": "text", + "content": "Nihal Jain" + } + ] + } + ], + "index": 155 + }, + { + "bbox": [ + 389, + 478, + 476, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 478, + 476, + 487 + ], + "spans": [ + { + "bbox": [ + 389, + 478, + 476, + 487 + ], + "type": "text", + "content": "Niharika Chopparapu" + } + ] + } + ], + "index": 156 + }, + { + "bbox": [ + 389, + 489, + 419, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 489, + 419, + 497 + ], + "spans": [ + { + "bbox": [ + 389, + 489, + 419, + 497 + ], + "type": "text", + "content": "Nik Xu" + } + ] + } + ], + "index": 157 + }, + { + "bbox": [ + 389, + 499, + 442, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 499, + 442, + 508 + ], + "spans": [ + { + "bbox": [ + 389, + 499, + 442, + 508 + ], + "type": "text", + "content": "Nikko Strom" + } + ] + } + ], + "index": 158 + }, + { + "bbox": [ + 389, + 510, + 478, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 510, + 478, + 519 + ], + "spans": [ + { + "bbox": [ + 389, + 510, + 478, + 519 + ], + "type": "text", + "content": "Nikolaos Malandrakis" + } + ] + } + ], + "index": 159 + }, + { + "bbox": [ + 389, + 521, + 455, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 521, + 455, + 529 + ], + "spans": [ + { + "bbox": [ + 389, + 521, + 455, + 529 + ], + "type": "text", + "content": "Nimisha Mishra" + } + ] + } + ], + "index": 160 + }, + { + "bbox": [ + 389, + 532, + 443, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 532, + 443, + 540 + ], + "spans": [ + { + "bbox": [ + 389, + 532, + 443, + 540 + ], + "type": "text", + "content": "Ninad Parkhi" + } + ] + } + ], + "index": 161 + }, + { + "bbox": [ + 389, + 543, + 459, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 543, + 459, + 552 + ], + "spans": [ + { + "bbox": [ + 389, + 543, + 459, + 552 + ], + "type": "text", + "content": "Ninareh Mehrabi" + } + ] + } + ], + "index": 162 + }, + { + "bbox": [ + 389, + 554, + 440, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 554, + 440, + 562 + ], + "spans": [ + { + "bbox": [ + 389, + 554, + 440, + 562 + ], + "type": "text", + "content": "Nishita Sant" + } + ] + } + ], + "index": 163 + }, + { + "bbox": [ + 389, + 564, + 448, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 564, + 448, + 574 + ], + "spans": [ + { + "bbox": [ + 389, + 564, + 448, + 574 + ], + "type": "text", + "content": "Nishtha Gupta" + } + ] + } + ], + "index": 164 + }, + { + "bbox": [ + 389, + 575, + 446, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 575, + 446, + 585 + ], + "spans": [ + { + "bbox": [ + 389, + 575, + 446, + 585 + ], + "type": "text", + "content": "Nitesh Sekhar" + } + ] + } + ], + "index": 165 + }, + { + "bbox": [ + 389, + 586, + 446, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 586, + 446, + 597 + ], + "spans": [ + { + "bbox": [ + 389, + 586, + 446, + 597 + ], + "type": "text", + "content": "Nithin Rajeev" + } + ] + } + ], + "index": 166 + }, + { + "bbox": [ + 389, + 597, + 499, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 597, + 499, + 607 + ], + "spans": [ + { + "bbox": [ + 389, + 597, + 499, + 607 + ], + "type": "text", + "content": "Nithish Raja Chidambaram" + } + ] + } + ], + "index": 167 + }, + { + "bbox": [ + 389, + 609, + 437, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 609, + 437, + 616 + ], + "spans": [ + { + "bbox": [ + 389, + 609, + 437, + 616 + ], + "type": "text", + "content": "Nitish Dhar" + } + ] + } + ], + "index": 168 + }, + { + "bbox": [ + 389, + 619, + 459, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 619, + 459, + 629 + ], + "spans": [ + { + "bbox": [ + 389, + 619, + 459, + 629 + ], + "type": "text", + "content": "Noor Bhagwagar" + } + ] + } + ], + "index": 169 + }, + { + "bbox": [ + 389, + 630, + 446, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 630, + 446, + 640 + ], + "spans": [ + { + "bbox": [ + 389, + 630, + 446, + 640 + ], + "type": "text", + "content": "Noy Konforty" + } + ] + } + ], + "index": 170 + }, + { + "bbox": [ + 389, + 641, + 436, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 641, + 436, + 649 + ], + "spans": [ + { + "bbox": [ + 389, + 641, + 436, + 649 + ], + "type": "text", + "content": "Omar Babu" + } + ] + } + ], + "index": 171 + }, + { + "bbox": [ + 389, + 652, + 443, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 652, + 443, + 661 + ], + "spans": [ + { + "bbox": [ + 389, + 652, + 443, + 661 + ], + "type": "text", + "content": "Omid Razavi" + } + ] + } + ], + "index": 172 + }, + { + "bbox": [ + 389, + 662, + 462, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 662, + 462, + 673 + ], + "spans": [ + { + "bbox": [ + 389, + 662, + 462, + 673 + ], + "type": "text", + "content": "Orchid Majumder" + } + ] + } + ], + "index": 173 + }, + { + "bbox": [ + 389, + 673, + 435, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 673, + 435, + 682 + ], + "spans": [ + { + "bbox": [ + 389, + 673, + 435, + 682 + ], + "type": "text", + "content": "Osama Dar" + } + ] + } + ], + "index": 174 + }, + { + "bbox": [ + 389, + 685, + 432, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 685, + 432, + 693 + ], + "spans": [ + { + "bbox": [ + 389, + 685, + 432, + 693 + ], + "type": "text", + "content": "Oscar Hsu" + } + ] + } + ], + "index": 175 + }, + { + "bbox": [ + 389, + 696, + 442, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 696, + 442, + 704 + ], + "spans": [ + { + "bbox": [ + 389, + 696, + 442, + 704 + ], + "type": "text", + "content": "Pablo Kvitca" + } + ] + } + ], + "index": 176 + }, + { + "bbox": [ + 389, + 706, + 449, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 706, + 449, + 716 + ], + "spans": [ + { + "bbox": [ + 389, + 706, + 449, + 716 + ], + "type": "text", + "content": "Pallavi Pandey" + } + ] + } + ], + "index": 177 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 178 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 144, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 144, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 144, + 83 + ], + "type": "text", + "content": "Parker Seegmiller" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 84, + 127, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 84, + 127, + 95 + ], + "spans": [ + { + "bbox": [ + 70, + 84, + 127, + 95 + ], + "type": "text", + "content": "Patrick Lange" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 95, + 122, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 95, + 122, + 105 + ], + "spans": [ + { + "bbox": [ + 70, + 95, + 122, + 105 + ], + "type": "text", + "content": "Paul Ferraro" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 106, + 132, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 106, + 132, + 116 + ], + "spans": [ + { + "bbox": [ + 70, + 106, + 132, + 116 + ], + "type": "text", + "content": "Payal Motwani" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 117, + 138, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 117, + 138, + 127 + ], + "spans": [ + { + "bbox": [ + 70, + 117, + 138, + 127 + ], + "type": "text", + "content": "Pegah Kharazmi" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 128, + 110, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 128, + 110, + 138 + ], + "spans": [ + { + "bbox": [ + 70, + 128, + 110, + 138 + ], + "type": "text", + "content": "Pei Wang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 139, + 119, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 139, + 119, + 148 + ], + "spans": [ + { + "bbox": [ + 70, + 139, + 119, + 148 + ], + "type": "text", + "content": "Pengfei Liu" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 150, + 126, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 150, + 126, + 159 + ], + "spans": [ + { + "bbox": [ + 70, + 150, + 126, + 159 + ], + "type": "text", + "content": "Peter Bradtke" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 160, + 114, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 160, + 114, + 170 + ], + "spans": [ + { + "bbox": [ + 70, + 160, + 114, + 170 + ], + "type": "text", + "content": "Peter Gotoz" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 171, + 116, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 171, + 116, + 180 + ], + "spans": [ + { + "bbox": [ + 70, + 171, + 116, + 180 + ], + "type": "text", + "content": "Peter Zhou" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 182, + 124, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 124, + 193 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 124, + 193 + ], + "type": "text", + "content": "Pichao Wang" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 194, + 124, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 194, + 124, + 203 + ], + "spans": [ + { + "bbox": [ + 70, + 194, + 124, + 203 + ], + "type": "text", + "content": "Piotr Poskart" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 204, + 138, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 204, + 138, + 214 + ], + "spans": [ + { + "bbox": [ + 70, + 204, + 138, + 214 + ], + "type": "text", + "content": "Pooja Sonawane" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 215, + 145, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 215, + 145, + 225 + ], + "spans": [ + { + "bbox": [ + 70, + 215, + 145, + 225 + ], + "type": "text", + "content": "Pradeep Natarajan" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 226, + 151, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 226, + 151, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 226, + 151, + 236 + ], + "type": "text", + "content": "Pradyun Ramadorai" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 237, + 123, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 237, + 123, + 247 + ], + "spans": [ + { + "bbox": [ + 70, + 237, + 123, + 247 + ], + "type": "text", + "content": "Pralam Shah" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 248, + 134, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 248, + 134, + 257 + ], + "spans": [ + { + "bbox": [ + 70, + 248, + 134, + 257 + ], + "type": "text", + "content": "Prasad Nirantar" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 258, + 142, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 258, + 142, + 268 + ], + "spans": [ + { + "bbox": [ + 70, + 258, + 142, + 268 + ], + "type": "text", + "content": "Prasanthi Chavali" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 270, + 162, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 270, + 162, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 270, + 162, + 280 + ], + "type": "text", + "content": "Prashan Wanigasekara" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 281, + 130, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 281, + 130, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 281, + 130, + 289 + ], + "type": "text", + "content": "Prashant Saraf" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 291, + 123, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 291, + 123, + 302 + ], + "spans": [ + { + "bbox": [ + 70, + 291, + 123, + 302 + ], + "type": "text", + "content": "Prashun Dey" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 303, + 127, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 303, + 127, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 303, + 127, + 312 + ], + "type": "text", + "content": "Pratyush Pant" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 313, + 132, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 313, + 132, + 323 + ], + "spans": [ + { + "bbox": [ + 70, + 313, + 132, + 323 + ], + "type": "text", + "content": "Prerak Pradhan" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 70, + 324, + 121, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 121, + 335 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 121, + 335 + ], + "type": "text", + "content": "Preyaa Patel" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 70, + 335, + 141, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 335, + 141, + 345 + ], + "spans": [ + { + "bbox": [ + 70, + 335, + 141, + 345 + ], + "type": "text", + "content": "Priyanka Dadlani" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 70, + 346, + 182, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 346, + 182, + 355 + ], + "spans": [ + { + "bbox": [ + 70, + 346, + 182, + 355 + ], + "type": "text", + "content": "Prudhvee Narasimha Sadha" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 70, + 357, + 106, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 357, + 106, + 367 + ], + "spans": [ + { + "bbox": [ + 70, + 357, + 106, + 367 + ], + "type": "text", + "content": "Qi Dong" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 70, + 369, + 106, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 369, + 106, + 377 + ], + "spans": [ + { + "bbox": [ + 70, + 369, + 106, + 377 + ], + "type": "text", + "content": "Qian Hu" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 70, + 379, + 140, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 379, + 140, + 388 + ], + "spans": [ + { + "bbox": [ + 70, + 379, + 140, + 388 + ], + "type": "text", + "content": "Qiaozi (QZ) Gao" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 70, + 390, + 108, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 390, + 108, + 400 + ], + "spans": [ + { + "bbox": [ + 70, + 390, + 108, + 400 + ], + "type": "text", + "content": "Qing Liu" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 70, + 401, + 117, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 401, + 117, + 411 + ], + "spans": [ + { + "bbox": [ + 70, + 401, + 117, + 411 + ], + "type": "text", + "content": "Quinn Lam" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 70, + 412, + 113, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 412, + 113, + 421 + ], + "spans": [ + { + "bbox": [ + 70, + 412, + 113, + 421 + ], + "type": "text", + "content": "Quynh Do" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 70, + 422, + 126, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 126, + 431 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 126, + 431 + ], + "type": "text", + "content": "R. Manmatha" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 70, + 433, + 126, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 433, + 126, + 442 + ], + "spans": [ + { + "bbox": [ + 70, + 433, + 126, + 442 + ], + "type": "text", + "content": "Rachel Willis" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 70, + 445, + 114, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 445, + 114, + 453 + ], + "spans": [ + { + "bbox": [ + 70, + 445, + 114, + 453 + ], + "type": "text", + "content": "Rafael Liu" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 70, + 455, + 118, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 455, + 118, + 464 + ], + "spans": [ + { + "bbox": [ + 70, + 455, + 118, + 464 + ], + "type": "text", + "content": "Rafal Ellert" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 70, + 466, + 129, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 466, + 129, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 466, + 129, + 475 + ], + "type": "text", + "content": "Rafal Kalinski" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 70, + 477, + 133, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 477, + 133, + 487 + ], + "spans": [ + { + "bbox": [ + 70, + 477, + 133, + 487 + ], + "type": "text", + "content": "Rafi Al Attrach" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 70, + 488, + 126, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 488, + 126, + 498 + ], + "spans": [ + { + "bbox": [ + 70, + 488, + 126, + 498 + ], + "type": "text", + "content": "Ragha Prasad" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 70, + 498, + 127, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 498, + 127, + 509 + ], + "spans": [ + { + "bbox": [ + 70, + 498, + 127, + 509 + ], + "type": "text", + "content": "Ragini Prasad" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 70, + 510, + 135, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 510, + 135, + 520 + ], + "spans": [ + { + "bbox": [ + 70, + 510, + 135, + 520 + ], + "type": "text", + "content": "Raguvir Kunani" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 70, + 521, + 123, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 123, + 531 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 123, + 531 + ], + "type": "text", + "content": "Rahul Gupta" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 70, + 533, + 128, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 533, + 128, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 533, + 128, + 540 + ], + "type": "text", + "content": "Rahul Sharma" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 70, + 542, + 125, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 542, + 125, + 552 + ], + "spans": [ + { + "bbox": [ + 70, + 542, + 125, + 552 + ], + "type": "text", + "content": "Rahul Tewari" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 70, + 553, + 157, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 553, + 157, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 553, + 157, + 563 + ], + "type": "text", + "content": "Rajaganesh Baskaran" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 70, + 564, + 121, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 564, + 121, + 574 + ], + "spans": [ + { + "bbox": [ + 70, + 564, + 121, + 574 + ], + "type": "text", + "content": "Rajan Singh" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 70, + 575, + 120, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 575, + 120, + 586 + ], + "spans": [ + { + "bbox": [ + 70, + 575, + 120, + 586 + ], + "type": "text", + "content": "Rajiv Gupta" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 70, + 586, + 121, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 586, + 121, + 597 + ], + "spans": [ + { + "bbox": [ + 70, + 586, + 121, + 597 + ], + "type": "text", + "content": "Rajiv Reddy" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 70, + 597, + 134, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 597, + 134, + 607 + ], + "spans": [ + { + "bbox": [ + 70, + 597, + 134, + 607 + ], + "type": "text", + "content": "Rajshekhar Das" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 70, + 608, + 129, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 608, + 129, + 617 + ], + "spans": [ + { + "bbox": [ + 70, + 608, + 129, + 617 + ], + "type": "text", + "content": "Rakesh Chada" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 70, + 619, + 192, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 619, + 192, + 628 + ], + "spans": [ + { + "bbox": [ + 70, + 619, + 192, + 628 + ], + "type": "text", + "content": "Rakesh Vaideeswaran Mahesh" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 70, + 629, + 157, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 629, + 157, + 639 + ], + "spans": [ + { + "bbox": [ + 70, + 629, + 157, + 639 + ], + "type": "text", + "content": "Ram Chandrasekaran" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 70, + 641, + 143, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 641, + 143, + 651 + ], + "spans": [ + { + "bbox": [ + 70, + 641, + 143, + 651 + ], + "type": "text", + "content": "Ramesh Nallapati" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 70, + 652, + 107, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 652, + 107, + 661 + ], + "spans": [ + { + "bbox": [ + 70, + 652, + 107, + 661 + ], + "type": "text", + "content": "Ran Xue" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 70, + 662, + 165, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 662, + 165, + 673 + ], + "spans": [ + { + "bbox": [ + 70, + 662, + 165, + 673 + ], + "type": "text", + "content": "Rashmi Gangadharaiah" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 70, + 673, + 142, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 673, + 142, + 683 + ], + "spans": [ + { + "bbox": [ + 70, + 673, + 142, + 683 + ], + "type": "text", + "content": "Ravi Rachakonda" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 70, + 685, + 133, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 685, + 133, + 695 + ], + "spans": [ + { + "bbox": [ + 70, + 685, + 133, + 695 + ], + "type": "text", + "content": "Renxian Zhang" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 70, + 696, + 143, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 143, + 704 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 143, + 704 + ], + "type": "text", + "content": "Rexhina Blloshmi" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 70, + 706, + 141, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 706, + 141, + 716 + ], + "spans": [ + { + "bbox": [ + 70, + 706, + 141, + 716 + ], + "type": "text", + "content": "Rishabh Agrawal" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 230, + 72, + 289, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 72, + 289, + 83 + ], + "spans": [ + { + "bbox": [ + 230, + 72, + 289, + 83 + ], + "type": "text", + "content": "Robert Enyedi" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 230, + 84, + 283, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 84, + 283, + 94 + ], + "spans": [ + { + "bbox": [ + 230, + 84, + 283, + 94 + ], + "type": "text", + "content": "Robert Lowe" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 230, + 95, + 292, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 95, + 292, + 104 + ], + "spans": [ + { + "bbox": [ + 230, + 95, + 292, + 104 + ], + "type": "text", + "content": "Robik Shrestha" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 230, + 106, + 313, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 106, + 313, + 115 + ], + "spans": [ + { + "bbox": [ + 230, + 106, + 313, + 115 + ], + "type": "text", + "content": "Robinson Piramuthu" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 230, + 117, + 280, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 117, + 280, + 126 + ], + "spans": [ + { + "bbox": [ + 230, + 117, + 280, + 126 + ], + "type": "text", + "content": "Rohail Asad" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 230, + 128, + 291, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 128, + 291, + 137 + ], + "spans": [ + { + "bbox": [ + 230, + 128, + 291, + 137 + ], + "type": "text", + "content": "Rohan Khanna" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 230, + 139, + 302, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 139, + 302, + 149 + ], + "spans": [ + { + "bbox": [ + 230, + 139, + 302, + 149 + ], + "type": "text", + "content": "Rohan Mukherjee" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 230, + 150, + 280, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 150, + 280, + 159 + ], + "spans": [ + { + "bbox": [ + 230, + 150, + 280, + 159 + ], + "type": "text", + "content": "Rohit Mittal" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 230, + 160, + 282, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 160, + 282, + 170 + ], + "spans": [ + { + "bbox": [ + 230, + 160, + 282, + 170 + ], + "type": "text", + "content": "Rohit Prasad" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 230, + 171, + 350, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 171, + 350, + 182 + ], + "spans": [ + { + "bbox": [ + 230, + 171, + 350, + 182 + ], + "type": "text", + "content": "Rohith Mysore Vijaya Kumar" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 230, + 182, + 285, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 182, + 285, + 192 + ], + "spans": [ + { + "bbox": [ + 230, + 182, + 285, + 192 + ], + "type": "text", + "content": "Ron Diamant" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 230, + 194, + 289, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 194, + 289, + 203 + ], + "spans": [ + { + "bbox": [ + 230, + 194, + 289, + 203 + ], + "type": "text", + "content": "Ruchita Gupta" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 230, + 204, + 273, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 204, + 273, + 213 + ], + "spans": [ + { + "bbox": [ + 230, + 204, + 273, + 213 + ], + "type": "text", + "content": "Ruiwen Li" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 230, + 215, + 276, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 215, + 276, + 225 + ], + "spans": [ + { + "bbox": [ + 230, + 215, + 276, + 225 + ], + "type": "text", + "content": "Ruoying Li" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 230, + 226, + 297, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 226, + 297, + 236 + ], + "spans": [ + { + "bbox": [ + 230, + 226, + 297, + 236 + ], + "type": "text", + "content": "RushabhFegade" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 230, + 237, + 280, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 237, + 280, + 247 + ], + "spans": [ + { + "bbox": [ + 230, + 237, + 280, + 247 + ], + "type": "text", + "content": "Ruxu Zhang" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 230, + 248, + 282, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 248, + 282, + 258 + ], + "spans": [ + { + "bbox": [ + 230, + 248, + 282, + 258 + ], + "type": "text", + "content": "Ryan Arbow" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 230, + 259, + 276, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 259, + 276, + 268 + ], + "spans": [ + { + "bbox": [ + 230, + 259, + 276, + 268 + ], + "type": "text", + "content": "Ryan Chen" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 230, + 270, + 289, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 270, + 289, + 279 + ], + "spans": [ + { + "bbox": [ + 230, + 270, + 289, + 279 + ], + "type": "text", + "content": "Ryan Gabbard" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 230, + 281, + 282, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 281, + 282, + 289 + ], + "spans": [ + { + "bbox": [ + 230, + 281, + 282, + 289 + ], + "type": "text", + "content": "Ryan Hoium" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 230, + 292, + 274, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 292, + 274, + 301 + ], + "spans": [ + { + "bbox": [ + 230, + 292, + 274, + 301 + ], + "type": "text", + "content": "Ryan King" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 230, + 303, + 309, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 303, + 309, + 312 + ], + "spans": [ + { + "bbox": [ + 230, + 303, + 309, + 312 + ], + "type": "text", + "content": "Sabarishkumar Iyer" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 230, + 313, + 288, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 313, + 288, + 323 + ], + "spans": [ + { + "bbox": [ + 230, + 313, + 288, + 323 + ], + "type": "text", + "content": "Sachal Malick" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 230, + 324, + 299, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 324, + 299, + 334 + ], + "spans": [ + { + "bbox": [ + 230, + 324, + 299, + 334 + ], + "type": "text", + "content": "Sahar Movaghati" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 230, + 335, + 282, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 335, + 282, + 344 + ], + "spans": [ + { + "bbox": [ + 230, + 335, + 282, + 344 + ], + "type": "text", + "content": "Sai Balakavi" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 230, + 346, + 269, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 346, + 269, + 355 + ], + "spans": [ + { + "bbox": [ + 230, + 346, + 269, + 355 + ], + "type": "text", + "content": "Sai Jakka" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 230, + 357, + 319, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 357, + 319, + 367 + ], + "spans": [ + { + "bbox": [ + 230, + 357, + 319, + 367 + ], + "type": "text", + "content": "Sai Kashyap Paruvelli" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 230, + 368, + 326, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 368, + 326, + 378 + ], + "spans": [ + { + "bbox": [ + 230, + 368, + 326, + 378 + ], + "type": "text", + "content": "Sai Muralidhar Jayanthi" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 230, + 379, + 350, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 379, + 350, + 388 + ], + "spans": [ + { + "bbox": [ + 230, + 379, + 350, + 388 + ], + "type": "text", + "content": "Saicharan Shriram Mujumdar" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 230, + 390, + 298, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 390, + 298, + 399 + ], + "spans": [ + { + "bbox": [ + 230, + 390, + 298, + 399 + ], + "type": "text", + "content": "Sainyam Kapoor" + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 230, + 401, + 282, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 401, + 282, + 411 + ], + "spans": [ + { + "bbox": [ + 230, + 401, + 282, + 411 + ], + "type": "text", + "content": "Sajjad Beygi" + } + ] + } + ], + "index": 90 + }, + { + "bbox": [ + 230, + 412, + 295, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 412, + 295, + 422 + ], + "spans": [ + { + "bbox": [ + 230, + 412, + 295, + 422 + ], + "type": "text", + "content": "Saket Dingliwal" + } + ] + } + ], + "index": 91 + }, + { + "bbox": [ + 230, + 423, + 281, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 423, + 281, + 431 + ], + "spans": [ + { + "bbox": [ + 230, + 423, + 281, + 431 + ], + "type": "text", + "content": "Saleh Soltan" + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 230, + 434, + 280, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 434, + 280, + 442 + ], + "spans": [ + { + "bbox": [ + 230, + 434, + 280, + 442 + ], + "type": "text", + "content": "Sam Ricklin" + } + ] + } + ], + "index": 93 + }, + { + "bbox": [ + 230, + 445, + 279, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 445, + 279, + 453 + ], + "spans": [ + { + "bbox": [ + 230, + 445, + 279, + 453 + ], + "type": "text", + "content": "Sam Tucker" + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 230, + 455, + 286, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 455, + 286, + 464 + ], + "spans": [ + { + "bbox": [ + 230, + 455, + 286, + 464 + ], + "type": "text", + "content": "Sameer Sinha" + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 230, + 466, + 314, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 466, + 314, + 476 + ], + "spans": [ + { + "bbox": [ + 230, + 466, + 314, + 476 + ], + "type": "text", + "content": "Samridhi Choudhary" + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 230, + 478, + 280, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 478, + 280, + 486 + ], + "spans": [ + { + "bbox": [ + 230, + 478, + 280, + 486 + ], + "type": "text", + "content": "Samson Tan" + } + ] + } + ], + "index": 97 + }, + { + "bbox": [ + 230, + 488, + 302, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 488, + 302, + 497 + ], + "spans": [ + { + "bbox": [ + 230, + 488, + 302, + 497 + ], + "type": "text", + "content": "Samuel Broscheit" + } + ] + } + ], + "index": 98 + }, + { + "bbox": [ + 230, + 498, + 297, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 498, + 297, + 508 + ], + "spans": [ + { + "bbox": [ + 230, + 498, + 297, + 508 + ], + "type": "text", + "content": "Samuel Schulter" + } + ] + } + ], + "index": 99 + }, + { + "bbox": [ + 230, + 510, + 298, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 510, + 298, + 520 + ], + "spans": [ + { + "bbox": [ + 230, + 510, + 298, + 520 + ], + "type": "text", + "content": "Sanchit Agarwal" + } + ] + } + ], + "index": 100 + }, + { + "bbox": [ + 230, + 521, + 290, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 521, + 290, + 531 + ], + "spans": [ + { + "bbox": [ + 230, + 521, + 290, + 531 + ], + "type": "text", + "content": "Sandeep Atluri" + } + ] + } + ], + "index": 101 + }, + { + "bbox": [ + 230, + 533, + 289, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 533, + 289, + 540 + ], + "spans": [ + { + "bbox": [ + 230, + 533, + 289, + 540 + ], + "type": "text", + "content": "Sander Valstar" + } + ] + } + ], + "index": 102 + }, + { + "bbox": [ + 230, + 543, + 298, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 543, + 298, + 552 + ], + "spans": [ + { + "bbox": [ + 230, + 543, + 298, + 552 + ], + "type": "text", + "content": "Sanjana Shankar" + } + ] + } + ], + "index": 103 + }, + { + "bbox": [ + 230, + 554, + 307, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 554, + 307, + 563 + ], + "spans": [ + { + "bbox": [ + 230, + 554, + 307, + 563 + ], + "type": "text", + "content": "Sanyukta Sanyukta" + } + ] + } + ], + "index": 104 + }, + { + "bbox": [ + 230, + 564, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 564, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 230, + 564, + 295, + 574 + ], + "type": "text", + "content": "Sarthak Khanna" + } + ] + } + ], + "index": 105 + }, + { + "bbox": [ + 230, + 575, + 311, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 575, + 311, + 586 + ], + "spans": [ + { + "bbox": [ + 230, + 575, + 311, + 586 + ], + "type": "text", + "content": "Sarvpriye Khetrapal" + } + ] + } + ], + "index": 106 + }, + { + "bbox": [ + 230, + 586, + 308, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 586, + 308, + 596 + ], + "spans": [ + { + "bbox": [ + 230, + 586, + 308, + 596 + ], + "type": "text", + "content": "Satish Janakiraman" + } + ] + } + ], + "index": 107 + }, + { + "bbox": [ + 230, + 597, + 282, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 597, + 282, + 606 + ], + "spans": [ + { + "bbox": [ + 230, + 597, + 282, + 606 + ], + "type": "text", + "content": "Saumil Shah" + } + ] + } + ], + "index": 108 + }, + { + "bbox": [ + 230, + 608, + 299, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 608, + 299, + 617 + ], + "spans": [ + { + "bbox": [ + 230, + 608, + 299, + 617 + ], + "type": "text", + "content": "Saurabh Akolkar" + } + ] + } + ], + "index": 109 + }, + { + "bbox": [ + 230, + 619, + 282, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 619, + 282, + 628 + ], + "spans": [ + { + "bbox": [ + 230, + 619, + 282, + 628 + ], + "type": "text", + "content": "Saurabh Giri" + } + ] + } + ], + "index": 110 + }, + { + "bbox": [ + 230, + 629, + 314, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 629, + 314, + 639 + ], + "spans": [ + { + "bbox": [ + 230, + 629, + 314, + 639 + ], + "type": "text", + "content": "Saurabh Khandelwal" + } + ] + } + ], + "index": 111 + }, + { + "bbox": [ + 230, + 641, + 291, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 641, + 291, + 650 + ], + "spans": [ + { + "bbox": [ + 230, + 641, + 291, + 650 + ], + "type": "text", + "content": "Saurabh Pawar" + } + ] + } + ], + "index": 112 + }, + { + "bbox": [ + 230, + 652, + 286, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 652, + 286, + 661 + ], + "spans": [ + { + "bbox": [ + 230, + 652, + 286, + 661 + ], + "type": "text", + "content": "Saurabh Sahu" + } + ] + } + ], + "index": 113 + }, + { + "bbox": [ + 230, + 662, + 279, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 662, + 279, + 673 + ], + "spans": [ + { + "bbox": [ + 230, + 662, + 279, + 673 + ], + "type": "text", + "content": "Sean Huang" + } + ] + } + ], + "index": 114 + }, + { + "bbox": [ + 230, + 673, + 267, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 673, + 267, + 683 + ], + "spans": [ + { + "bbox": [ + 230, + 673, + 267, + 683 + ], + "type": "text", + "content": "Sejun Ra" + } + ] + } + ], + "index": 115 + }, + { + "bbox": [ + 230, + 685, + 311, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 685, + 311, + 695 + ], + "spans": [ + { + "bbox": [ + 230, + 685, + 311, + 695 + ], + "type": "text", + "content": "Senthilkumar Gopal" + } + ] + } + ], + "index": 116 + }, + { + "bbox": [ + 230, + 696, + 314, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 696, + 314, + 705 + ], + "spans": [ + { + "bbox": [ + 230, + 696, + 314, + 705 + ], + "type": "text", + "content": "Sergei Dobroshinsky" + } + ] + } + ], + "index": 117 + }, + { + "bbox": [ + 230, + 706, + 276, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 706, + 276, + 715 + ], + "spans": [ + { + "bbox": [ + 230, + 706, + 276, + 715 + ], + "type": "text", + "content": "Shadi Saba" + } + ] + } + ], + "index": 118 + }, + { + "bbox": [ + 389, + 72, + 440, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 72, + 440, + 83 + ], + "spans": [ + { + "bbox": [ + 389, + 72, + 440, + 83 + ], + "type": "text", + "content": "Shamik Roy" + } + ] + } + ], + "index": 119 + }, + { + "bbox": [ + 389, + 84, + 434, + 93 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 84, + 434, + 93 + ], + "spans": [ + { + "bbox": [ + 389, + 84, + 434, + 93 + ], + "type": "text", + "content": "Shamit Lal" + } + ] + } + ], + "index": 120 + }, + { + "bbox": [ + 389, + 95, + 493, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 95, + 493, + 104 + ], + "spans": [ + { + "bbox": [ + 389, + 95, + 493, + 104 + ], + "type": "text", + "content": "Shankar Ananthakrishnan" + } + ] + } + ], + "index": 121 + }, + { + "bbox": [ + 389, + 106, + 430, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 106, + 430, + 115 + ], + "spans": [ + { + "bbox": [ + 389, + 106, + 430, + 115 + ], + "type": "text", + "content": "Sharon Li" + } + ] + } + ], + "index": 122 + }, + { + "bbox": [ + 389, + 117, + 454, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 117, + 454, + 127 + ], + "spans": [ + { + "bbox": [ + 389, + 117, + 454, + 127 + ], + "type": "text", + "content": "Shashwat Srijan" + } + ] + } + ], + "index": 123 + }, + { + "bbox": [ + 389, + 128, + 449, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 128, + 449, + 137 + ], + "spans": [ + { + "bbox": [ + 389, + 128, + 449, + 137 + ], + "type": "text", + "content": "Shekhar Bhide" + } + ] + } + ], + "index": 124 + }, + { + "bbox": [ + 389, + 139, + 461, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 139, + 461, + 149 + ], + "spans": [ + { + "bbox": [ + 389, + 139, + 461, + 149 + ], + "type": "text", + "content": "Sheng Long Tang" + } + ] + } + ], + "index": 125 + }, + { + "bbox": [ + 389, + 150, + 433, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 150, + 433, + 159 + ], + "spans": [ + { + "bbox": [ + 389, + 150, + 433, + 159 + ], + "type": "text", + "content": "Sheng Zha" + } + ] + } + ], + "index": 126 + }, + { + "bbox": [ + 389, + 161, + 449, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 161, + 449, + 171 + ], + "spans": [ + { + "bbox": [ + 389, + 161, + 449, + 171 + ], + "type": "text", + "content": "Sheree Oraby" + } + ] + } + ], + "index": 127 + }, + { + "bbox": [ + 389, + 171, + 449, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 171, + 449, + 181 + ], + "spans": [ + { + "bbox": [ + 389, + 171, + 449, + 181 + ], + "type": "text", + "content": "Sherif Mostafa" + } + ] + } + ], + "index": 128 + }, + { + "bbox": [ + 389, + 182, + 423, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 182, + 423, + 192 + ], + "spans": [ + { + "bbox": [ + 389, + 182, + 423, + 192 + ], + "type": "text", + "content": "Shiqi Li" + } + ] + } + ], + "index": 129 + }, + { + "bbox": [ + 389, + 194, + 455, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 194, + 455, + 202 + ], + "spans": [ + { + "bbox": [ + 389, + 194, + 455, + 202 + ], + "type": "text", + "content": "Shishir Bharathi" + } + ] + } + ], + "index": 130 + }, + { + "bbox": [ + 389, + 204, + 454, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 204, + 454, + 213 + ], + "spans": [ + { + "bbox": [ + 389, + 204, + 454, + 213 + ], + "type": "text", + "content": "ShivamPrakash" + } + ] + } + ], + "index": 131 + }, + { + "bbox": [ + 389, + 215, + 452, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 215, + 452, + 225 + ], + "spans": [ + { + "bbox": [ + 389, + 215, + 452, + 225 + ], + "type": "text", + "content": "Shiyuan Huang" + } + ] + } + ], + "index": 132 + }, + { + "bbox": [ + 389, + 226, + 466, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 226, + 466, + 236 + ], + "spans": [ + { + "bbox": [ + 389, + 226, + 466, + 236 + ], + "type": "text", + "content": "Shreya Yembarwar" + } + ] + } + ], + "index": 133 + }, + { + "bbox": [ + 389, + 237, + 455, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 237, + 455, + 247 + ], + "spans": [ + { + "bbox": [ + 389, + 237, + 455, + 247 + ], + "type": "text", + "content": "Shreyas Pansare" + } + ] + } + ], + "index": 134 + }, + { + "bbox": [ + 389, + 248, + 476, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 248, + 476, + 258 + ], + "spans": [ + { + "bbox": [ + 389, + 248, + 476, + 258 + ], + "type": "text", + "content": "Shreyas Subramanian" + } + ] + } + ], + "index": 135 + }, + { + "bbox": [ + 389, + 259, + 444, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 259, + 444, + 269 + ], + "spans": [ + { + "bbox": [ + 389, + 259, + 444, + 269 + ], + "type": "text", + "content": "Shrijeet Joshi" + } + ] + } + ], + "index": 136 + }, + { + "bbox": [ + 389, + 270, + 429, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 270, + 429, + 279 + ], + "spans": [ + { + "bbox": [ + 389, + 270, + 429, + 279 + ], + "type": "text", + "content": "Shuai Liu" + } + ] + } + ], + "index": 137 + }, + { + "bbox": [ + 389, + 281, + 435, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 281, + 435, + 290 + ], + "spans": [ + { + "bbox": [ + 389, + 281, + 435, + 290 + ], + "type": "text", + "content": "Shuai Tang" + } + ] + } + ], + "index": 138 + }, + { + "bbox": [ + 389, + 292, + 466, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 292, + 466, + 301 + ], + "spans": [ + { + "bbox": [ + 389, + 292, + 466, + 301 + ], + "type": "text", + "content": "Shubham Chandak" + } + ] + } + ], + "index": 139 + }, + { + "bbox": [ + 389, + 303, + 450, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 303, + 450, + 312 + ], + "spans": [ + { + "bbox": [ + 389, + 303, + 450, + 312 + ], + "type": "text", + "content": "Shubham Garg" + } + ] + } + ], + "index": 140 + }, + { + "bbox": [ + 389, + 313, + 460, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 313, + 460, + 323 + ], + "spans": [ + { + "bbox": [ + 389, + 313, + 460, + 323 + ], + "type": "text", + "content": "Shubham Katiyar" + } + ] + } + ], + "index": 141 + }, + { + "bbox": [ + 389, + 324, + 456, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 324, + 456, + 334 + ], + "spans": [ + { + "bbox": [ + 389, + 324, + 456, + 334 + ], + "type": "text", + "content": "Shubham Mehta" + } + ] + } + ], + "index": 142 + }, + { + "bbox": [ + 389, + 335, + 466, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 335, + 466, + 345 + ], + "spans": [ + { + "bbox": [ + 389, + 335, + 466, + 345 + ], + "type": "text", + "content": "Shubham Srivastav" + } + ] + } + ], + "index": 143 + }, + { + "bbox": [ + 389, + 346, + 434, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 346, + 434, + 356 + ], + "spans": [ + { + "bbox": [ + 389, + 346, + 434, + 356 + ], + "type": "text", + "content": "Shuo Yang" + } + ] + } + ], + "index": 144 + }, + { + "bbox": [ + 389, + 357, + 464, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 357, + 464, + 367 + ], + "spans": [ + { + "bbox": [ + 389, + 357, + 464, + 367 + ], + "type": "text", + "content": "Siddalingesha D S" + } + ] + } + ], + "index": 145 + }, + { + "bbox": [ + 389, + 369, + 476, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 369, + 476, + 378 + ], + "spans": [ + { + "bbox": [ + 389, + 369, + 476, + 378 + ], + "type": "text", + "content": "Siddharth Choudhary" + } + ] + } + ], + "index": 146 + }, + { + "bbox": [ + 389, + 379, + 485, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 379, + 485, + 388 + ], + "spans": [ + { + "bbox": [ + 389, + 379, + 485, + 388 + ], + "type": "text", + "content": "Siddharth Singh Senger" + } + ] + } + ], + "index": 147 + }, + { + "bbox": [ + 389, + 390, + 440, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 390, + 440, + 399 + ], + "spans": [ + { + "bbox": [ + 389, + 390, + 440, + 399 + ], + "type": "text", + "content": "Simon Babb" + } + ] + } + ], + "index": 148 + }, + { + "bbox": [ + 389, + 401, + 439, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 401, + 439, + 411 + ], + "spans": [ + { + "bbox": [ + 389, + 401, + 439, + 411 + ], + "type": "text", + "content": "Sina Moeini" + } + ] + } + ], + "index": 149 + }, + { + "bbox": [ + 389, + 412, + 430, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 412, + 430, + 422 + ], + "spans": [ + { + "bbox": [ + 389, + 412, + 430, + 422 + ], + "type": "text", + "content": "Siqi Deng" + } + ] + } + ], + "index": 150 + }, + { + "bbox": [ + 389, + 423, + 457, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 423, + 457, + 432 + ], + "spans": [ + { + "bbox": [ + 389, + 423, + 457, + 432 + ], + "type": "text", + "content": "Siva Loganathan" + } + ] + } + ], + "index": 151 + }, + { + "bbox": [ + 389, + 434, + 472, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 434, + 472, + 444 + ], + "spans": [ + { + "bbox": [ + 389, + 434, + 472, + 444 + ], + "type": "text", + "content": "Slawomir Domagala" + } + ] + } + ], + "index": 152 + }, + { + "bbox": [ + 389, + 445, + 444, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 445, + 444, + 453 + ], + "spans": [ + { + "bbox": [ + 389, + 445, + 444, + 453 + ], + "type": "text", + "content": "Sneha Narkar" + } + ] + } + ], + "index": 153 + }, + { + "bbox": [ + 389, + 455, + 452, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 455, + 452, + 464 + ], + "spans": [ + { + "bbox": [ + 389, + 455, + 452, + 464 + ], + "type": "text", + "content": "Sneha Wadhwa" + } + ] + } + ], + "index": 154 + }, + { + "bbox": [ + 389, + 467, + 458, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 467, + 458, + 477 + ], + "spans": [ + { + "bbox": [ + 389, + 467, + 458, + 477 + ], + "type": "text", + "content": "Songyang Zhang" + } + ] + } + ], + "index": 155 + }, + { + "bbox": [ + 389, + 478, + 449, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 478, + 449, + 487 + ], + "spans": [ + { + "bbox": [ + 389, + 478, + 449, + 487 + ], + "type": "text", + "content": "Songyao Jiang" + } + ] + } + ], + "index": 156 + }, + { + "bbox": [ + 389, + 488, + 446, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 488, + 446, + 498 + ], + "spans": [ + { + "bbox": [ + 389, + 488, + 446, + 498 + ], + "type": "text", + "content": "Sony Trenous" + } + ] + } + ], + "index": 157 + }, + { + "bbox": [ + 389, + 499, + 465, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 499, + 465, + 509 + ], + "spans": [ + { + "bbox": [ + 389, + 499, + 465, + 509 + ], + "type": "text", + "content": "Soumajyoti Sarkar" + } + ] + } + ], + "index": 158 + }, + { + "bbox": [ + 389, + 510, + 444, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 510, + 444, + 520 + ], + "spans": [ + { + "bbox": [ + 389, + 510, + 444, + 520 + ], + "type": "text", + "content": "Soumya Saha" + } + ] + } + ], + "index": 159 + }, + { + "bbox": [ + 389, + 521, + 452, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 521, + 452, + 531 + ], + "spans": [ + { + "bbox": [ + 389, + 521, + 452, + 531 + ], + "type": "text", + "content": "Sourabh Reddy" + } + ] + } + ], + "index": 160 + }, + { + "bbox": [ + 389, + 532, + 455, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 532, + 455, + 540 + ], + "spans": [ + { + "bbox": [ + 389, + 532, + 455, + 540 + ], + "type": "text", + "content": "Sourav Dokania" + } + ] + } + ], + "index": 161 + }, + { + "bbox": [ + 389, + 543, + 482, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 543, + 482, + 553 + ], + "spans": [ + { + "bbox": [ + 389, + 543, + 482, + 553 + ], + "type": "text", + "content": "Spurthideepika Sandiri" + } + ] + } + ], + "index": 162 + }, + { + "bbox": [ + 389, + 554, + 463, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 554, + 463, + 563 + ], + "spans": [ + { + "bbox": [ + 389, + 554, + 463, + 563 + ], + "type": "text", + "content": "Spyros Matsoukas" + } + ] + } + ], + "index": 163 + }, + { + "bbox": [ + 389, + 564, + 456, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 564, + 456, + 574 + ], + "spans": [ + { + "bbox": [ + 389, + 564, + 456, + 574 + ], + "type": "text", + "content": "Sravan Bodapati" + } + ] + } + ], + "index": 164 + }, + { + "bbox": [ + 389, + 575, + 490, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 575, + 490, + 586 + ], + "spans": [ + { + "bbox": [ + 389, + 575, + 490, + 586 + ], + "type": "text", + "content": "Sri Harsha Reddy Wdaru" + } + ] + } + ], + "index": 165 + }, + { + "bbox": [ + 389, + 586, + 509, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 586, + 509, + 596 + ], + "spans": [ + { + "bbox": [ + 389, + 586, + 509, + 596 + ], + "type": "text", + "content": "Sridevi Yagati Venkateshdatta" + } + ] + } + ], + "index": 166 + }, + { + "bbox": [ + 389, + 597, + 460, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 597, + 460, + 606 + ], + "spans": [ + { + "bbox": [ + 389, + 597, + 460, + 606 + ], + "type": "text", + "content": "Srikanth Ronanki" + } + ] + } + ], + "index": 167 + }, + { + "bbox": [ + 389, + 608, + 498, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 608, + 498, + 617 + ], + "spans": [ + { + "bbox": [ + 389, + 608, + 498, + 617 + ], + "type": "text", + "content": "Srinivasan R Veeravanallur" + } + ] + } + ], + "index": 168 + }, + { + "bbox": [ + 389, + 619, + 474, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 619, + 474, + 629 + ], + "spans": [ + { + "bbox": [ + 389, + 619, + 474, + 629 + ], + "type": "text", + "content": "Sriram Venkatapathy" + } + ] + } + ], + "index": 169 + }, + { + "bbox": [ + 389, + 630, + 498, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 630, + 498, + 640 + ], + "spans": [ + { + "bbox": [ + 389, + 630, + 498, + 640 + ], + "type": "text", + "content": "Sriramprabhu Sankaraguru" + } + ] + } + ], + "index": 170 + }, + { + "bbox": [ + 389, + 641, + 452, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 641, + 452, + 650 + ], + "spans": [ + { + "bbox": [ + 389, + 641, + 452, + 650 + ], + "type": "text", + "content": "Sruthi Gorantla" + } + ] + } + ], + "index": 171 + }, + { + "bbox": [ + 389, + 652, + 450, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 652, + 450, + 661 + ], + "spans": [ + { + "bbox": [ + 389, + 652, + 450, + 661 + ], + "type": "text", + "content": "Sruthi Karuturi" + } + ] + } + ], + "index": 172 + }, + { + "bbox": [ + 389, + 662, + 454, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 662, + 454, + 671 + ], + "spans": [ + { + "bbox": [ + 389, + 662, + 454, + 671 + ], + "type": "text", + "content": "Stefan Schroedl" + } + ] + } + ], + "index": 173 + }, + { + "bbox": [ + 389, + 673, + 464, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 673, + 464, + 684 + ], + "spans": [ + { + "bbox": [ + 389, + 673, + 464, + 684 + ], + "type": "text", + "content": "Subendhu Rongali" + } + ] + } + ], + "index": 174 + }, + { + "bbox": [ + 389, + 685, + 455, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 685, + 455, + 693 + ], + "spans": [ + { + "bbox": [ + 389, + 685, + 455, + 693 + ], + "type": "text", + "content": "Subbasis Kundu" + } + ] + } + ], + "index": 175 + }, + { + "bbox": [ + 389, + 696, + 455, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 696, + 455, + 704 + ], + "spans": [ + { + "bbox": [ + 389, + 696, + 455, + 704 + ], + "type": "text", + "content": "Suhaila Shakiah" + } + ] + } + ], + "index": 176 + }, + { + "bbox": [ + 389, + 706, + 446, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 706, + 446, + 715 + ], + "spans": [ + { + "bbox": [ + 389, + 706, + 446, + 715 + ], + "type": "text", + "content": "Sukriti Tiwari" + } + ] + } + ], + "index": 177 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 742, + 309, + 750 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 178 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 124, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 124, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 124, + 83 + ], + "type": "text", + "content": "Sumit Bharti" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 84, + 124, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 84, + 124, + 94 + ], + "spans": [ + { + "bbox": [ + 70, + 84, + 124, + 94 + ], + "type": "text", + "content": "Sumita Sami" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 95, + 135, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 95, + 135, + 105 + ], + "spans": [ + { + "bbox": [ + 70, + 95, + 135, + 105 + ], + "type": "text", + "content": "Sumith Mathew" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 106, + 111, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 106, + 111, + 116 + ], + "spans": [ + { + "bbox": [ + 70, + 106, + 111, + 116 + ], + "type": "text", + "content": "Sunny Yu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 117, + 125, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 117, + 125, + 126 + ], + "spans": [ + { + "bbox": [ + 70, + 117, + 125, + 126 + ], + "type": "text", + "content": "Sunwoo Kim" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 128, + 158, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 128, + 158, + 138 + ], + "spans": [ + { + "bbox": [ + 70, + 128, + 158, + 138 + ], + "type": "text", + "content": "Suraj Bajirao Malode" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 139, + 162, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 139, + 162, + 149 + ], + "spans": [ + { + "bbox": [ + 70, + 139, + 162, + 149 + ], + "type": "text", + "content": "Susana Cumplido Riel" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 149, + 129, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 149, + 129, + 159 + ], + "spans": [ + { + "bbox": [ + 70, + 149, + 129, + 159 + ], + "type": "text", + "content": "Swapnil Palod" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 160, + 122, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 160, + 122, + 171 + ], + "spans": [ + { + "bbox": [ + 70, + 160, + 122, + 171 + ], + "type": "text", + "content": "Swastik Roy" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 171, + 127, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 171, + 127, + 182 + ], + "spans": [ + { + "bbox": [ + 70, + 171, + 127, + 182 + ], + "type": "text", + "content": "Syed Furqhan" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 182, + 140, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 140, + 193 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 140, + 193 + ], + "type": "text", + "content": "Tagyoung Chung" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 194, + 143, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 194, + 143, + 203 + ], + "spans": [ + { + "bbox": [ + 70, + 194, + 143, + 203 + ], + "type": "text", + "content": "Takuma Yoshitani" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 204, + 138, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 204, + 138, + 215 + ], + "spans": [ + { + "bbox": [ + 70, + 204, + 138, + 215 + ], + "type": "text", + "content": "Taojiannan Yang" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 216, + 146, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 216, + 146, + 225 + ], + "spans": [ + { + "bbox": [ + 70, + 216, + 146, + 225 + ], + "type": "text", + "content": "Tejaswi Chillakura" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 226, + 130, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 226, + 130, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 226, + 130, + 236 + ], + "type": "text", + "content": "Tejwant Bajwa" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 237, + 134, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 237, + 134, + 247 + ], + "spans": [ + { + "bbox": [ + 70, + 237, + 134, + 247 + ], + "type": "text", + "content": "Temi Lajumoke" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 248, + 117, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 248, + 117, + 258 + ], + "spans": [ + { + "bbox": [ + 70, + 248, + 117, + 258 + ], + "type": "text", + "content": "Thanh Tran" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 259, + 140, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 259, + 140, + 268 + ], + "spans": [ + { + "bbox": [ + 70, + 259, + 140, + 268 + ], + "type": "text", + "content": "Thomas Gueudre" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 270, + 125, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 270, + 125, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 270, + 125, + 280 + ], + "type": "text", + "content": "Thomas Jung" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 70, + 281, + 113, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 281, + 113, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 281, + 113, + 289 + ], + "type": "text", + "content": "Tianhui Li" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 70, + 292, + 129, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 292, + 129, + 300 + ], + "spans": [ + { + "bbox": [ + 70, + 292, + 129, + 300 + ], + "type": "text", + "content": "Tim Seemman" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 70, + 302, + 131, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 302, + 131, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 302, + 131, + 312 + ], + "type": "text", + "content": "Timothy Leffel" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 70, + 313, + 132, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 313, + 132, + 323 + ], + "spans": [ + { + "bbox": [ + 70, + 313, + 132, + 323 + ], + "type": "text", + "content": "Tingting Xiang" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 70, + 324, + 113, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 113, + 334 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 113, + 334 + ], + "type": "text", + "content": "Tirth Patel" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 70, + 335, + 134, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 335, + 134, + 344 + ], + "spans": [ + { + "bbox": [ + 70, + 335, + 134, + 344 + ], + "type": "text", + "content": "Tobias Domhan" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 70, + 346, + 122, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 346, + 122, + 355 + ], + "spans": [ + { + "bbox": [ + 70, + 346, + 122, + 355 + ], + "type": "text", + "content": "Tobias Falke" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 70, + 357, + 111, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 357, + 111, + 367 + ], + "spans": [ + { + "bbox": [ + 70, + 357, + 111, + 367 + ], + "type": "text", + "content": "Toby Guo" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 70, + 369, + 101, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 369, + 101, + 377 + ], + "spans": [ + { + "bbox": [ + 70, + 369, + 101, + 377 + ], + "type": "text", + "content": "Tom Li" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 70, + 379, + 155, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 379, + 155, + 388 + ], + "spans": [ + { + "bbox": [ + 70, + 379, + 155, + 388 + ], + "type": "text", + "content": "Tomasz Horsczaruk" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 70, + 389, + 137, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 389, + 137, + 399 + ], + "spans": [ + { + "bbox": [ + 70, + 389, + 137, + 399 + ], + "type": "text", + "content": "Tomasz Jedynak" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 70, + 400, + 136, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 400, + 136, + 411 + ], + "spans": [ + { + "bbox": [ + 70, + 400, + 136, + 411 + ], + "type": "text", + "content": "Tushar Kulkarni" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 70, + 411, + 115, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 411, + 115, + 421 + ], + "spans": [ + { + "bbox": [ + 70, + 411, + 115, + 421 + ], + "type": "text", + "content": "Tyst Marin" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 70, + 422, + 132, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 132, + 433 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 132, + 433 + ], + "type": "text", + "content": "Tytus Metrycki" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 70, + 434, + 131, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 434, + 131, + 444 + ], + "spans": [ + { + "bbox": [ + 70, + 434, + 131, + 444 + ], + "type": "text", + "content": "Tzu-Yen Wang" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 70, + 445, + 119, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 445, + 119, + 455 + ], + "spans": [ + { + "bbox": [ + 70, + 445, + 119, + 455 + ], + "type": "text", + "content": "Umang Jain" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 70, + 456, + 131, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 456, + 131, + 465 + ], + "spans": [ + { + "bbox": [ + 70, + 456, + 131, + 465 + ], + "type": "text", + "content": "Upendra Singh" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 70, + 466, + 141, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 466, + 141, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 466, + 141, + 475 + ], + "type": "text", + "content": "Utkarsh Chirimar" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 70, + 476, + 131, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 476, + 131, + 487 + ], + "spans": [ + { + "bbox": [ + 70, + 476, + 131, + 487 + ], + "type": "text", + "content": "Vaibhav Gupta" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 70, + 488, + 123, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 488, + 123, + 497 + ], + "spans": [ + { + "bbox": [ + 70, + 488, + 123, + 497 + ], + "type": "text", + "content": "Vanshil Shah" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 70, + 498, + 141, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 498, + 141, + 509 + ], + "spans": [ + { + "bbox": [ + 70, + 498, + 141, + 509 + ], + "type": "text", + "content": "Varad Deshpande" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 70, + 510, + 124, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 510, + 124, + 520 + ], + "spans": [ + { + "bbox": [ + 70, + 510, + 124, + 520 + ], + "type": "text", + "content": "Varad Gunjal" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 70, + 521, + 144, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 144, + 530 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 144, + 530 + ], + "type": "text", + "content": "Varsha Srikeshava" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 70, + 531, + 124, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 531, + 124, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 531, + 124, + 540 + ], + "type": "text", + "content": "Varsha Vivek" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 70, + 542, + 140, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 542, + 140, + 553 + ], + "spans": [ + { + "bbox": [ + 70, + 542, + 140, + 553 + ], + "type": "text", + "content": "Varun Bharadwaj" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 70, + 554, + 126, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 554, + 126, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 554, + 126, + 563 + ], + "type": "text", + "content": "Varun Gangal" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 70, + 564, + 125, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 564, + 125, + 574 + ], + "spans": [ + { + "bbox": [ + 70, + 564, + 125, + 574 + ], + "type": "text", + "content": "Varun Kumar" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 70, + 575, + 143, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 575, + 143, + 586 + ], + "spans": [ + { + "bbox": [ + 70, + 575, + 143, + 586 + ], + "type": "text", + "content": "Venkatesh Elango" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 70, + 586, + 138, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 586, + 138, + 595 + ], + "spans": [ + { + "bbox": [ + 70, + 586, + 138, + 595 + ], + "type": "text", + "content": "Vicente Ordonez" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 70, + 597, + 117, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 597, + 117, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 597, + 117, + 605 + ], + "type": "text", + "content": "Victor Soto" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 229, + 72, + 326, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 72, + 326, + 83 + ], + "spans": [ + { + "bbox": [ + 229, + 72, + 326, + 83 + ], + "type": "text", + "content": "Vignesh Radhakrishnan" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 229, + 84, + 282, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 84, + 282, + 94 + ], + "spans": [ + { + "bbox": [ + 229, + 84, + 282, + 94 + ], + "type": "text", + "content": "Vihang Patel" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 229, + 95, + 286, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 95, + 286, + 106 + ], + "spans": [ + { + "bbox": [ + 229, + 95, + 286, + 106 + ], + "type": "text", + "content": "Vikram Singh" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 229, + 106, + 333, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 106, + 333, + 116 + ], + "spans": [ + { + "bbox": [ + 229, + 106, + 333, + 116 + ], + "type": "text", + "content": "Vinay Varma Kolanuvada" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 229, + 117, + 359, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 117, + 359, + 127 + ], + "spans": [ + { + "bbox": [ + 229, + 117, + 359, + 127 + ], + "type": "text", + "content": "Vinayshekhar Bannihatti Kumar" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 229, + 128, + 294, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 128, + 294, + 138 + ], + "spans": [ + { + "bbox": [ + 229, + 128, + 294, + 138 + ], + "type": "text", + "content": "Vincent Auvray" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 229, + 139, + 301, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 139, + 301, + 148 + ], + "spans": [ + { + "bbox": [ + 229, + 139, + 301, + 148 + ], + "type": "text", + "content": "Vincent Cartillier" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 229, + 149, + 290, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 149, + 290, + 159 + ], + "spans": [ + { + "bbox": [ + 229, + 149, + 290, + 159 + ], + "type": "text", + "content": "Vincent Ponzo" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 229, + 160, + 277, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 160, + 277, + 171 + ], + "spans": [ + { + "bbox": [ + 229, + 160, + 277, + 171 + ], + "type": "text", + "content": "Violet Peng" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 229, + 171, + 307, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 171, + 307, + 181 + ], + "spans": [ + { + "bbox": [ + 229, + 171, + 307, + 181 + ], + "type": "text", + "content": "Vishal Khandelwal" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 229, + 182, + 278, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 182, + 278, + 191 + ], + "spans": [ + { + "bbox": [ + 229, + 182, + 278, + 191 + ], + "type": "text", + "content": "Vishal Naik" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 229, + 193, + 324, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 193, + 324, + 202 + ], + "spans": [ + { + "bbox": [ + 229, + 193, + 324, + 202 + ], + "type": "text", + "content": "Vishvesh Sahasrabudhe" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 229, + 204, + 292, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 204, + 292, + 214 + ], + "spans": [ + { + "bbox": [ + 229, + 204, + 292, + 214 + ], + "type": "text", + "content": "Vitaliy Korolev" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 229, + 215, + 299, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 215, + 299, + 224 + ], + "spans": [ + { + "bbox": [ + 229, + 215, + 299, + 224 + ], + "type": "text", + "content": "Vivek Gokuladas" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 229, + 226, + 284, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 226, + 284, + 236 + ], + "spans": [ + { + "bbox": [ + 229, + 226, + 284, + 236 + ], + "type": "text", + "content": "Vivek Madan" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 229, + 237, + 309, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 237, + 309, + 246 + ], + "spans": [ + { + "bbox": [ + 229, + 237, + 309, + 246 + ], + "type": "text", + "content": "Vivek Subramanian" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 229, + 247, + 290, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 247, + 290, + 258 + ], + "spans": [ + { + "bbox": [ + 229, + 247, + 290, + 258 + ], + "type": "text", + "content": "Volkan Cevher" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 229, + 258, + 285, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 258, + 285, + 269 + ], + "spans": [ + { + "bbox": [ + 229, + 258, + 285, + 269 + ], + "type": "text", + "content": "Vrinda Gupta" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 229, + 270, + 282, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 270, + 282, + 279 + ], + "spans": [ + { + "bbox": [ + 229, + 270, + 282, + 279 + ], + "type": "text", + "content": "Wael Hamza" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 229, + 281, + 274, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 281, + 274, + 290 + ], + "spans": [ + { + "bbox": [ + 229, + 281, + 274, + 290 + ], + "type": "text", + "content": "Wei Zhang" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 229, + 292, + 288, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 292, + 288, + 301 + ], + "spans": [ + { + "bbox": [ + 229, + 292, + 288, + 301 + ], + "type": "text", + "content": "Weitong Ruan" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 229, + 302, + 290, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 302, + 290, + 312 + ], + "spans": [ + { + "bbox": [ + 229, + 302, + 290, + 312 + ], + "type": "text", + "content": "Weiwei Cheng" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 229, + 313, + 277, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 313, + 277, + 323 + ], + "spans": [ + { + "bbox": [ + 229, + 313, + 277, + 323 + ], + "type": "text", + "content": "Wen Zhang" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 229, + 324, + 281, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 324, + 281, + 334 + ], + "spans": [ + { + "bbox": [ + 229, + 324, + 281, + 334 + ], + "type": "text", + "content": "Wenbo Zhao" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 229, + 335, + 281, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 335, + 281, + 345 + ], + "spans": [ + { + "bbox": [ + 229, + 335, + 281, + 345 + ], + "type": "text", + "content": "Wenyan Yao" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 229, + 346, + 302, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 346, + 302, + 357 + ], + "spans": [ + { + "bbox": [ + 229, + 346, + 302, + 357 + ], + "type": "text", + "content": "Wenzhuo Ouyang" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 229, + 357, + 295, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 357, + 295, + 367 + ], + "spans": [ + { + "bbox": [ + 229, + 357, + 295, + 367 + ], + "type": "text", + "content": "Wesley Dashner" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 229, + 368, + 304, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 368, + 304, + 378 + ], + "spans": [ + { + "bbox": [ + 229, + 368, + 304, + 378 + ], + "type": "text", + "content": "William Campbell" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 229, + 379, + 279, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 379, + 279, + 388 + ], + "spans": [ + { + "bbox": [ + 229, + 379, + 279, + 388 + ], + "type": "text", + "content": "William Lin" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 229, + 389, + 290, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 389, + 290, + 399 + ], + "spans": [ + { + "bbox": [ + 229, + 389, + 290, + 399 + ], + "type": "text", + "content": "Willian Martin" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 229, + 400, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 400, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 229, + 400, + 287, + 411 + ], + "type": "text", + "content": "Wyatt Pearson" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 229, + 411, + 279, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 411, + 279, + 422 + ], + "spans": [ + { + "bbox": [ + 229, + 411, + 279, + 422 + ], + "type": "text", + "content": "Xiang Jiang" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 229, + 422, + 287, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 422, + 287, + 433 + ], + "spans": [ + { + "bbox": [ + 229, + 422, + 287, + 433 + ], + "type": "text", + "content": "Xiangxing Lu" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 229, + 434, + 290, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 434, + 290, + 444 + ], + "spans": [ + { + "bbox": [ + 229, + 434, + 290, + 444 + ], + "type": "text", + "content": "Xiangyang Shi" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 229, + 445, + 288, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 445, + 288, + 455 + ], + "spans": [ + { + "bbox": [ + 229, + 445, + 288, + 455 + ], + "type": "text", + "content": "Xianwen Peng" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 229, + 456, + 287, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 456, + 287, + 465 + ], + "spans": [ + { + "bbox": [ + 229, + 456, + 287, + 465 + ], + "type": "text", + "content": "Xiaofeng Gao" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 229, + 466, + 283, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 466, + 283, + 476 + ], + "spans": [ + { + "bbox": [ + 229, + 466, + 283, + 476 + ], + "type": "text", + "content": "Xiaoge Jiang" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 229, + 477, + 280, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 477, + 280, + 487 + ], + "spans": [ + { + "bbox": [ + 229, + 477, + 280, + 487 + ], + "type": "text", + "content": "Xiaohan Fei" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 229, + 488, + 288, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 488, + 288, + 498 + ], + "spans": [ + { + "bbox": [ + 229, + 488, + 288, + 498 + ], + "type": "text", + "content": "Xiaohui Wang" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 229, + 498, + 313, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 498, + 313, + 509 + ], + "spans": [ + { + "bbox": [ + 229, + 498, + 313, + 509 + ], + "type": "text", + "content": "Xiaozhou Joey Zhou" + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 229, + 510, + 268, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 510, + 268, + 520 + ], + "spans": [ + { + "bbox": [ + 229, + 510, + 268, + 520 + ], + "type": "text", + "content": "Xin Feng" + } + ] + } + ], + "index": 90 + }, + { + "bbox": [ + 229, + 521, + 282, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 521, + 282, + 531 + ], + "spans": [ + { + "bbox": [ + 229, + 521, + 282, + 531 + ], + "type": "text", + "content": "Xinyan Zhao" + } + ] + } + ], + "index": 91 + }, + { + "bbox": [ + 229, + 532, + 285, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 532, + 285, + 542 + ], + "spans": [ + { + "bbox": [ + 229, + 532, + 285, + 542 + ], + "type": "text", + "content": "Xinyao Wang" + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 229, + 543, + 266, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 543, + 266, + 553 + ], + "spans": [ + { + "bbox": [ + 229, + 543, + 266, + 553 + ], + "type": "text", + "content": "Xinyu Li" + } + ] + } + ], + "index": 93 + }, + { + "bbox": [ + 229, + 554, + 271, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 554, + 271, + 564 + ], + "spans": [ + { + "bbox": [ + 229, + 554, + 271, + 564 + ], + "type": "text", + "content": "Xu Zhang" + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 229, + 565, + 277, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 565, + 277, + 574 + ], + "spans": [ + { + "bbox": [ + 229, + 565, + 277, + 574 + ], + "type": "text", + "content": "Xuan Wang" + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 229, + 575, + 273, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 575, + 273, + 585 + ], + "spans": [ + { + "bbox": [ + 229, + 575, + 273, + 585 + ], + "type": "text", + "content": "Xuandi Fu" + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 229, + 586, + 286, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 586, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 229, + 586, + 286, + 597 + ], + "type": "text", + "content": "Xueling Yuan" + } + ] + } + ], + "index": 97 + }, + { + "bbox": [ + 229, + 597, + 286, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 597, + 286, + 608 + ], + "spans": [ + { + "bbox": [ + 229, + 597, + 286, + 608 + ], + "type": "text", + "content": "Xuning Wang" + } + ] + } + ], + "index": 98 + }, + { + "bbox": [ + 388, + 72, + 462, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 72, + 462, + 83 + ], + "spans": [ + { + "bbox": [ + 388, + 72, + 462, + 83 + ], + "type": "text", + "content": "Yadunandana Rao" + } + ] + } + ], + "index": 99 + }, + { + "bbox": [ + 388, + 84, + 441, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 84, + 441, + 94 + ], + "spans": [ + { + "bbox": [ + 388, + 84, + 441, + 94 + ], + "type": "text", + "content": "Yair Tavizon" + } + ] + } + ], + "index": 100 + }, + { + "bbox": [ + 388, + 95, + 451, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 95, + 451, + 106 + ], + "spans": [ + { + "bbox": [ + 388, + 95, + 451, + 106 + ], + "type": "text", + "content": "Yan Rossiytsev" + } + ] + } + ], + "index": 101 + }, + { + "bbox": [ + 388, + 106, + 441, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 106, + 441, + 115 + ], + "spans": [ + { + "bbox": [ + 388, + 106, + 441, + 115 + ], + "type": "text", + "content": "Yanbei Chen" + } + ] + } + ], + "index": 102 + }, + { + "bbox": [ + 388, + 117, + 427, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 117, + 427, + 127 + ], + "spans": [ + { + "bbox": [ + 388, + 117, + 427, + 127 + ], + "type": "text", + "content": "Yang Liu" + } + ] + } + ], + "index": 103 + }, + { + "bbox": [ + 388, + 128, + 429, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 128, + 429, + 138 + ], + "spans": [ + { + "bbox": [ + 388, + 128, + 429, + 138 + ], + "type": "text", + "content": "Yang Zou" + } + ] + } + ], + "index": 104 + }, + { + "bbox": [ + 388, + 139, + 449, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 139, + 449, + 148 + ], + "spans": [ + { + "bbox": [ + 388, + 139, + 449, + 148 + ], + "type": "text", + "content": "Yangsook Park" + } + ] + } + ], + "index": 105 + }, + { + "bbox": [ + 388, + 149, + 455, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 149, + 455, + 160 + ], + "spans": [ + { + "bbox": [ + 388, + 149, + 455, + 160 + ], + "type": "text", + "content": "Yannick Versley" + } + ] + } + ], + "index": 106 + }, + { + "bbox": [ + 388, + 161, + 447, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 161, + 447, + 171 + ], + "spans": [ + { + "bbox": [ + 388, + 161, + 447, + 171 + ], + "type": "text", + "content": "Yanyan Zhang" + } + ] + } + ], + "index": 107 + }, + { + "bbox": [ + 388, + 171, + 432, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 171, + 432, + 181 + ], + "spans": [ + { + "bbox": [ + 388, + 171, + 432, + 181 + ], + "type": "text", + "content": "Yash Patel" + } + ] + } + ], + "index": 108 + }, + { + "bbox": [ + 388, + 182, + 448, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 182, + 448, + 193 + ], + "spans": [ + { + "bbox": [ + 388, + 182, + 448, + 193 + ], + "type": "text", + "content": "Yen-Cheng Lu" + } + ] + } + ], + "index": 109 + }, + { + "bbox": [ + 388, + 194, + 416, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 194, + 416, + 202 + ], + "spans": [ + { + "bbox": [ + 388, + 194, + 416, + 202 + ], + "type": "text", + "content": "Yi Pan" + } + ] + } + ], + "index": 110 + }, + { + "bbox": [ + 388, + 204, + 474, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 204, + 474, + 214 + ], + "spans": [ + { + "bbox": [ + 388, + 204, + 474, + 214 + ], + "type": "text", + "content": "Yi-Hsiang (Sean) Lai" + } + ] + } + ], + "index": 111 + }, + { + "bbox": [ + 388, + 215, + 433, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 215, + 433, + 224 + ], + "spans": [ + { + "bbox": [ + 388, + 215, + 433, + 224 + ], + "type": "text", + "content": "Yichen Hu" + } + ] + } + ], + "index": 112 + }, + { + "bbox": [ + 388, + 226, + 434, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 226, + 434, + 236 + ], + "spans": [ + { + "bbox": [ + 388, + 226, + 434, + 236 + ], + "type": "text", + "content": "Yida Wang" + } + ] + } + ], + "index": 113 + }, + { + "bbox": [ + 388, + 237, + 442, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 237, + 442, + 247 + ], + "spans": [ + { + "bbox": [ + 388, + 237, + 442, + 247 + ], + "type": "text", + "content": "Yiheng Zhou" + } + ] + } + ], + "index": 114 + }, + { + "bbox": [ + 388, + 248, + 436, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 248, + 436, + 258 + ], + "spans": [ + { + "bbox": [ + 388, + 248, + 436, + 258 + ], + "type": "text", + "content": "Yilin Xiang" + } + ] + } + ], + "index": 115 + }, + { + "bbox": [ + 388, + 259, + 425, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 259, + 425, + 269 + ], + "spans": [ + { + "bbox": [ + 388, + 259, + 425, + 269 + ], + "type": "text", + "content": "Ying Shi" + } + ] + } + ], + "index": 116 + }, + { + "bbox": [ + 388, + 270, + 434, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 270, + 434, + 280 + ], + "spans": [ + { + "bbox": [ + 388, + 270, + 434, + 280 + ], + "type": "text", + "content": "Ying Wang" + } + ] + } + ], + "index": 117 + }, + { + "bbox": [ + 388, + 281, + 451, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 281, + 451, + 290 + ], + "spans": [ + { + "bbox": [ + 388, + 281, + 451, + 290 + ], + "type": "text", + "content": "Yishai Galatzer" + } + ] + } + ], + "index": 118 + }, + { + "bbox": [ + 388, + 292, + 449, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 292, + 449, + 302 + ], + "spans": [ + { + "bbox": [ + 388, + 292, + 449, + 302 + ], + "type": "text", + "content": "Yongxin Wang" + } + ] + } + ], + "index": 119 + }, + { + "bbox": [ + 388, + 303, + 438, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 303, + 438, + 312 + ], + "spans": [ + { + "bbox": [ + 388, + 303, + 438, + 312 + ], + "type": "text", + "content": "Yorick Shen" + } + ] + } + ], + "index": 120 + }, + { + "bbox": [ + 388, + 313, + 437, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 313, + 437, + 323 + ], + "spans": [ + { + "bbox": [ + 388, + 313, + 437, + 323 + ], + "type": "text", + "content": "Yuchen Sun" + } + ] + } + ], + "index": 121 + }, + { + "bbox": [ + 388, + 324, + 456, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 324, + 456, + 334 + ], + "spans": [ + { + "bbox": [ + 388, + 324, + 456, + 334 + ], + "type": "text", + "content": "Yudi Purwatama" + } + ] + } + ], + "index": 122 + }, + { + "bbox": [ + 388, + 335, + 447, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 335, + 447, + 345 + ], + "spans": [ + { + "bbox": [ + 388, + 335, + 447, + 345 + ], + "type": "text", + "content": "Yue (Rex) Wu" + } + ] + } + ], + "index": 123 + }, + { + "bbox": [ + 388, + 346, + 419, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 346, + 419, + 355 + ], + "spans": [ + { + "bbox": [ + 388, + 346, + 419, + 355 + ], + "type": "text", + "content": "Yue Gu" + } + ] + } + ], + "index": 124 + }, + { + "bbox": [ + 388, + 357, + 450, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 357, + 450, + 368 + ], + "spans": [ + { + "bbox": [ + 388, + 357, + 450, + 368 + ], + "type": "text", + "content": "Yuechun Wang" + } + ] + } + ], + "index": 125 + }, + { + "bbox": [ + 388, + 369, + 436, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 369, + 436, + 379 + ], + "spans": [ + { + "bbox": [ + 388, + 369, + 436, + 379 + ], + "type": "text", + "content": "Yujun Zeng" + } + ] + } + ], + "index": 126 + }, + { + "bbox": [ + 388, + 380, + 449, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 380, + 449, + 388 + ], + "spans": [ + { + "bbox": [ + 388, + 380, + 449, + 388 + ], + "type": "text", + "content": "Yuncong Chen" + } + ] + } + ], + "index": 127 + }, + { + "bbox": [ + 388, + 389, + 439, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 389, + 439, + 399 + ], + "spans": [ + { + "bbox": [ + 388, + 389, + 439, + 399 + ], + "type": "text", + "content": "Yunke Zhou" + } + ] + } + ], + "index": 128 + }, + { + "bbox": [ + 388, + 400, + 441, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 400, + 441, + 411 + ], + "spans": [ + { + "bbox": [ + 388, + 400, + 441, + 411 + ], + "type": "text", + "content": "Yusheng Xie" + } + ] + } + ], + "index": 129 + }, + { + "bbox": [ + 388, + 411, + 430, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 411, + 430, + 422 + ], + "spans": [ + { + "bbox": [ + 388, + 411, + 430, + 422 + ], + "type": "text", + "content": "Yvon Guy" + } + ] + } + ], + "index": 130 + }, + { + "bbox": [ + 388, + 422, + 482, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 422, + 482, + 433 + ], + "spans": [ + { + "bbox": [ + 388, + 422, + 482, + 433 + ], + "type": "text", + "content": "Zbigniew Ambrozinski" + } + ] + } + ], + "index": 131 + }, + { + "bbox": [ + 388, + 434, + 441, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 434, + 441, + 443 + ], + "spans": [ + { + "bbox": [ + 388, + 434, + 441, + 443 + ], + "type": "text", + "content": "Zhaowei Cai" + } + ] + } + ], + "index": 132 + }, + { + "bbox": [ + 388, + 445, + 439, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 445, + 439, + 455 + ], + "spans": [ + { + "bbox": [ + 388, + 445, + 439, + 455 + ], + "type": "text", + "content": "Zhen Zhang" + } + ] + } + ], + "index": 133 + }, + { + "bbox": [ + 388, + 456, + 441, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 456, + 441, + 466 + ], + "spans": [ + { + "bbox": [ + 388, + 456, + 441, + 466 + ], + "type": "text", + "content": "Zheng Wang" + } + ] + } + ], + "index": 134 + }, + { + "bbox": [ + 388, + 467, + 443, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 467, + 443, + 476 + ], + "spans": [ + { + "bbox": [ + 388, + 467, + 443, + 476 + ], + "type": "text", + "content": "Zhenghui Jin" + } + ] + } + ], + "index": 135 + }, + { + "bbox": [ + 388, + 477, + 443, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 477, + 443, + 487 + ], + "spans": [ + { + "bbox": [ + 388, + 477, + 443, + 487 + ], + "type": "text", + "content": "Zhewei Zhao" + } + ] + } + ], + "index": 136 + }, + { + "bbox": [ + 388, + 488, + 435, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 488, + 435, + 498 + ], + "spans": [ + { + "bbox": [ + 388, + 488, + 435, + 498 + ], + "type": "text", + "content": "Zhiheng Li" + } + ] + } + ], + "index": 137 + }, + { + "bbox": [ + 388, + 498, + 442, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 498, + 442, + 509 + ], + "spans": [ + { + "bbox": [ + 388, + 498, + 442, + 509 + ], + "type": "text", + "content": "Zhiheng Luo" + } + ] + } + ], + "index": 138 + }, + { + "bbox": [ + 388, + 510, + 451, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 510, + 451, + 521 + ], + "spans": [ + { + "bbox": [ + 388, + 510, + 451, + 521 + ], + "type": "text", + "content": "Zhikang Zhang" + } + ] + } + ], + "index": 139 + }, + { + "bbox": [ + 388, + 521, + 437, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 521, + 437, + 531 + ], + "spans": [ + { + "bbox": [ + 388, + 521, + 437, + 531 + ], + "type": "text", + "content": "Zhilin Fang" + } + ] + } + ], + "index": 140 + }, + { + "bbox": [ + 388, + 532, + 426, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 532, + 426, + 541 + ], + "spans": [ + { + "bbox": [ + 388, + 532, + 426, + 541 + ], + "type": "text", + "content": "Zhiqi Bu" + } + ] + } + ], + "index": 141 + }, + { + "bbox": [ + 388, + 543, + 449, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 543, + 449, + 553 + ], + "spans": [ + { + "bbox": [ + 388, + 543, + 449, + 553 + ], + "type": "text", + "content": "Zhiyuan Wang" + } + ] + } + ], + "index": 142 + }, + { + "bbox": [ + 388, + 554, + 440, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 554, + 440, + 563 + ], + "spans": [ + { + "bbox": [ + 388, + 554, + 440, + 563 + ], + "type": "text", + "content": "Zhizhong Li" + } + ] + } + ], + "index": 143 + }, + { + "bbox": [ + 388, + 564, + 440, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 564, + 440, + 574 + ], + "spans": [ + { + "bbox": [ + 388, + 564, + 440, + 574 + ], + "type": "text", + "content": "Zijian Wang" + } + ] + } + ], + "index": 144 + }, + { + "bbox": [ + 388, + 575, + 470, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 575, + 470, + 586 + ], + "spans": [ + { + "bbox": [ + 388, + 575, + 470, + 586 + ], + "type": "text", + "content": "Zimeng (Chris) Qiu" + } + ] + } + ], + "index": 145 + }, + { + "bbox": [ + 388, + 586, + 422, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 586, + 422, + 595 + ], + "spans": [ + { + "bbox": [ + 388, + 586, + 422, + 595 + ], + "type": "text", + "content": "Zishi Li" + } + ] + } + ], + "index": 146 + }, + { + "bbox": [ + 69, + 624, + 179, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 624, + 179, + 636 + ], + "spans": [ + { + "bbox": [ + 69, + 624, + 179, + 636 + ], + "type": "text", + "content": "D.2 Acknowledgements" + } + ] + } + ], + "index": 147 + }, + { + "bbox": [ + 68, + 643, + 541, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 643, + 541, + 667 + ], + "spans": [ + { + "bbox": [ + 68, + 643, + 541, + 667 + ], + "type": "text", + "content": "We would like to acknowledge the following individuals who supported the development of the Nova models and services during the Nova program." + } + ] + } + ], + "index": 148 + }, + { + "bbox": [ + 69, + 678, + 161, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 678, + 161, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 678, + 161, + 689 + ], + "type": "text", + "content": "Abdelrahman Badawy" + } + ] + } + ], + "index": 149 + }, + { + "bbox": [ + 70, + 690, + 137, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 690, + 137, + 700 + ], + "spans": [ + { + "bbox": [ + 70, + 690, + 137, + 700 + ], + "type": "text", + "content": "Abtin Rasoulian" + } + ] + } + ], + "index": 150 + }, + { + "bbox": [ + 70, + 700, + 146, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 700, + 146, + 711 + ], + "spans": [ + { + "bbox": [ + 70, + 700, + 146, + 711 + ], + "type": "text", + "content": "Adam Baranowski" + } + ] + } + ], + "index": 151 + }, + { + "bbox": [ + 229, + 678, + 296, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 678, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 229, + 678, + 296, + 689 + ], + "type": "text", + "content": "Aishwarya Kore" + } + ] + } + ], + "index": 152 + }, + { + "bbox": [ + 229, + 690, + 329, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 690, + 329, + 700 + ], + "spans": [ + { + "bbox": [ + 229, + 690, + 329, + 700 + ], + "type": "text", + "content": "Aishwarya Padmakumar" + } + ] + } + ], + "index": 153 + }, + { + "bbox": [ + 229, + 701, + 276, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 701, + 276, + 710 + ], + "spans": [ + { + "bbox": [ + 229, + 701, + 276, + 710 + ], + "type": "text", + "content": "Alain Krok" + } + ] + } + ], + "index": 154 + }, + { + "bbox": [ + 388, + 678, + 439, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 678, + 439, + 689 + ], + "spans": [ + { + "bbox": [ + 388, + 678, + 439, + 689 + ], + "type": "text", + "content": "Alex Mould" + } + ] + } + ], + "index": 155 + }, + { + "bbox": [ + 388, + 690, + 427, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 690, + 427, + 700 + ], + "spans": [ + { + "bbox": [ + 388, + 690, + 427, + 700 + ], + "type": "text", + "content": "Alex Sun" + } + ] + } + ], + "index": 156 + }, + { + "bbox": [ + 388, + 700, + 481, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 700, + 481, + 712 + ], + "spans": [ + { + "bbox": [ + 388, + 700, + 481, + 712 + ], + "type": "text", + "content": "Alexandros Papangelis" + } + ] + } + ], + "index": 157 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 158 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 72, + 197, + 597 + ], + "blocks": [ + { + "bbox": [ + 69, + 72, + 197, + 597 + ], + "lines": [ + { + "bbox": [ + 69, + 72, + 197, + 597 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 197, + 597 + ], + "type": "table", + "html": "
Alfred Shen
Amaran Asokkumar
Amiya Chakraborty
Anastasios Alexandridis
Angeliki Metallinou
Anila Joshi
Anup Katariya
Arda Keskiner
Avinash Venkatagiri
Aya Elzoheiry
Baishali Chaudhury
Ben Friebe
Bigad Soleiman
Bob Li
Brad Porter
Brian Chou
Brian Yost
Burak Gozluklu
Chad Connally
Chris Azer
Chris Beauchene
Chris Greenwood
Chris Johnson
Clay Cheng
Craig Rowland
Di Jin
Di Wu
Diego Socolinsky
Don Kretsch
Dylan Martin
Emma Lister
Eva Lasarcyk
Evan Kravitz
Federico D'Alessio
Flora Wang
Francisco Calderon Rodriguez
Gamaleldin Elsayed
Gaurav Rele
Gaurav Sukhatme
Gourav Datta
Hadrien Glaude
Hanbo Wang
Hans Hoeijmaker
Haotian An
Harpreet Cheema
Harshit Pande
Hongbin Zheng
Huda Khayrallah
", + "image_path": "104d1d3b30677a61aa977e3bfb470ed7db16f34aecc77a8a5a3e3f519f29ea89.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 229, + 72, + 314, + 596 + ], + "blocks": [ + { + "bbox": [ + 229, + 72, + 314, + 596 + ], + "lines": [ + { + "bbox": [ + 229, + 72, + 314, + 596 + ], + "spans": [ + { + "bbox": [ + 229, + 72, + 314, + 596 + ], + "type": "table", + "html": "
Isaac Privitera
Jacob Zhiyuan Fang
Jady Liu
Jae Oh Woo
Jamal Saboune
James Park
Jianbo Yuan
Jianwei Feng
Jie Li
Jinwoo Park
Johan Esbjourner
Jonathan makunga
JoonHyung Kim
Jorge Beltran
Jose Garrido Ramas
Julie Baca
Justin Lewis
Kamran Razi
Kangyan Liu
Kasana Mahesh
Kelvin Qian
Kyle Goehner
Kyle Saggar
Laith Al-Saadoon
Lei Sun
Lily Liao
Long Chen
Lukacs Ablonczy
Luke Luneau
Maciej Eichler
Mallory McManamo
Manju Arakere
Matt McCoy
Matthew Chang
Meghal Varia
Meghana Ashok
Melanie Li
Mifu Suzuki
Negin Sokhandan
Nick Biso
Nico Bishop
Nicolle Borges
Palash Goyal
Parker Coleman
Paul Sumarokov
Pavel Kveton
Philipp Lerche
Pratibha Kumari
", + "image_path": "f7dc2c775b272d735f1b4017fbebd6ba3845352c29ef471fa5da43d37bbacaae.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 388, + 72, + 503, + 575 + ], + "blocks": [ + { + "bbox": [ + 388, + 72, + 503, + 575 + ], + "lines": [ + { + "bbox": [ + 388, + 72, + 503, + 575 + ], + "spans": [ + { + "bbox": [ + 388, + 72, + 503, + 575 + ], + "type": "table", + "html": "
Rahul Agarwal
Rahul Ghosh
Rahul Kulkarni
Raj Kumar
Ramana Keerthi
Rams Sundaram
Raymond Fang
Reethika Kesani
Ryan Razkenari
Sarath Krishnan
Scott Patten
Seokhwan Kim
Sepehr Eghbali
Sergey Pugachev
Sertan Alkan
Shailav Taneja
Sheamus Punch
Shikib Mehri
Shilpa Singh
Shraddha Ravishankar
Sijia Liu
Sitanshu Gupta
Sol Vesdapunt
Spencer Romo
Sravya Uppu
Srivani Kambhampati
Stephanie Xie
Sujitha Martin
Sungjin Lee
Sungmin Hong
Tanner McRae
Thomas Patterson
Tina Li
Tom Liang
Trong Nguyen
Vasudev Mahesh Purandare
Vidya Sagar Ravipati
Vu San Ha Huynh
Weijuan Wu
Xiaolong Li
Xinyi Xu
Yaroslav Nechaev
Yuan Tian
Yunfei Bai
Zach Hille
Ziyan Tian
", + "image_path": "45df84ea256037b222c86788c17751b23921927fe22cd2beade0beab39a07079.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "spans": [ + { + "bbox": [ + 227, + 34, + 380, + 45 + ], + "type": "text", + "content": "The Amazon Nova Family of Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 309, + 750 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file