diff --git "a/2025/ViewSRD_ 3D Visual Grounding via Structured Multi-View Decomposition/layout.json" "b/2025/ViewSRD_ 3D Visual Grounding via Structured Multi-View Decomposition/layout.json" new file mode 100644--- /dev/null +++ "b/2025/ViewSRD_ 3D Visual Grounding via Structured Multi-View Decomposition/layout.json" @@ -0,0 +1,9919 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 103, + 536, + 122 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 103, + 536, + 122 + ], + "spans": [ + { + "bbox": [ + 75, + 103, + 536, + 122 + ], + "type": "text", + "content": "ViewSRD: 3D Visual Grounding via Structured Multi-View Decomposition" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "spans": [ + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "content": "Ronggang Huang" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "content": ", Haoxin Yang" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "inline_equation", + "content": "^{1*†}" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "content": ", Yan Cai" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "content": ", Xuemiao Xu" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "inline_equation", + "content": "^{12345†}" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "content": ", Huaidong Zhang" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "text", + "content": ", Shengfeng He" + }, + { + "bbox": [ + 175, + 142, + 435, + 172 + ], + "type": "inline_equation", + "content": "^{6}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 172, + 536, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 172, + 536, + 186 + ], + "spans": [ + { + "bbox": [ + 74, + 172, + 536, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 74, + 172, + 536, + 186 + ], + "type": "text", + "content": " South China University of Technology " + }, + { + "bbox": [ + 74, + 172, + 536, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 74, + 172, + 536, + 186 + ], + "type": "text", + "content": " Guangdong Engineering Center for Large Model and GenAI Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 170, + 186, + 441, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 186, + 441, + 199 + ], + "spans": [ + { + "bbox": [ + 170, + 186, + 441, + 199 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 170, + 186, + 441, + 199 + ], + "type": "text", + "content": " State Key Laboratory of Subtropical Building and Urban Science" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 155, + 201, + 454, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 201, + 454, + 213 + ], + "spans": [ + { + "bbox": [ + 155, + 201, + 454, + 213 + ], + "type": "text", + "content": "4 Ministry of Education Key Laboratory of Big Data and Intelligent Robot" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 118, + 214, + 492, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 214, + 492, + 228 + ], + "spans": [ + { + "bbox": [ + 118, + 214, + 492, + 228 + ], + "type": "text", + "content": "5 Guangdong Provincial Key Lab of Computational Intelligence and Cyberspace Information" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 230, + 228, + 377, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 228, + 377, + 242 + ], + "spans": [ + { + "bbox": [ + 230, + 228, + 377, + 242 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 230, + 228, + 377, + 242 + ], + "type": "text", + "content": " Singapore Management University" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 152, + 269, + 200, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 269, + 200, + 282 + ], + "spans": [ + { + "bbox": [ + 152, + 269, + 200, + 282 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 294, + 296, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 294, + 296, + 582 + ], + "spans": [ + { + "bbox": [ + 55, + 294, + 296, + 582 + ], + "type": "text", + "content": "3D visual grounding aims to identify and localize objects in a 3D space based on textual descriptions. However, existing methods struggle with disentangling targets from anchors in complex multi-anchor queries and resolving inconsistencies in spatial descriptions caused by perspective variations. To tackle these challenges, we propose ViewSRD, a framework that formulates 3D visual grounding as a structured multi-view decomposition process. First, the Simple Relation Decoupling (SRD) module restructures complex multi-anchor queries into a set of targeted single-anchor statements, generating a structured set of perspective-aware descriptions that clarify positional relationships. These decomposed representations serve as the foundation for the Multi-view Textual-Scene Interaction (Multi-TSI) module, which integrates textual and scene features across multiple viewpoints using shared, Cross-modal Consistent View Tokens (CCVTs) to preserve spatial correlations. Finally, a Textual-Scene Reasoning module synthesizes multi-view predictions into a unified and robust 3D visual grounding. Experiments on 3D visual grounding datasets show that ViewSRD significantly outperforms state-of-the-art methods, particularly in complex queries requiring precise spatial differentiation. Code is available at https://github.com/visualjason/ViewSRD." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 591, + 135, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 591, + 135, + 604 + ], + "spans": [ + { + "bbox": [ + 55, + 591, + 135, + 604 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "type": "text", + "content": "3D Visual Grounding (3DVG) aims to establish semantic correspondences between natural language descriptions and target objects in a 3D space [19, 44]. This task has gained significant attention in applications such as visual language navigation [25, 57], intelligent agents [4, 49], and autonomous vehicles [9, 13]." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 316, + 267, + 555, + 385 + ], + "blocks": [ + { + "bbox": [ + 316, + 267, + 555, + 385 + ], + "lines": [ + { + "bbox": [ + 316, + 267, + 555, + 385 + ], + "spans": [ + { + "bbox": [ + 316, + 267, + 555, + 385 + ], + "type": "image", + "image_path": "946d478baee8357da051cab472b739f69fde08a54f1f66f44e3dd17a68c57079.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 393, + 555, + 461 + ], + "lines": [ + { + "bbox": [ + 313, + 393, + 555, + 461 + ], + "spans": [ + { + "bbox": [ + 313, + 393, + 555, + 461 + ], + "type": "text", + "content": "Figure 1. (a) Previous 3DVG methods struggle with ambiguities from complex multi-anchor queries and perspective shifts. (b) ViewSRD addresses this by using the SRD module to simplify queries and the CCVTs to capture viewpoint variations in both scene and textual modal, boosting cross-modal feature interaction and enhancing grounding accuracy." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 472, + 555, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 472, + 555, + 639 + ], + "spans": [ + { + "bbox": [ + 312, + 472, + 555, + 639 + ], + "type": "text", + "content": "Traditional single-view approaches rely on 2D sampled images to extract scene information [38] or construct scene graphs from textual descriptions [42]. However, these methods are inherently limited by their dependence on single-view cues, as language descriptions often presuppose specific viewpoints. To overcome this limitation, recent research has explored multi-view 3DVG, integrating multiple perspectives to enhance robustness [8, 31, 58]. Some methods process distinct descriptions for different viewpoints via manual annotation and learning [15, 37], while others incorporate spatial modules to encode relative spatial coordinates under specific perspectives [5]. However, they typically address only isolated aspects of the problem, limiting their effectiveness in handling complex multi-view scenarios." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 641, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 556, + 715 + ], + "type": "text", + "content": "Despite their potential, existing 3DVG models struggle to disentangle targets from anchors in multi-anchor textual descriptions [5, 14, 32]. Large language models (LLMs) often have difficulty interpreting such descriptions [17, 51], yet resolving these ambiguities is crucial for improving grounding accuracy [20]. Compounding this challenge, inconsist-" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "text", + "content": "This ICCV paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 693, + 204, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 693, + 204, + 703 + ], + "spans": [ + { + "bbox": [ + 66, + 693, + 204, + 703 + ], + "type": "text", + "content": "*The first two authors contributed equally." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 703, + 286, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 286, + 712 + ], + "type": "text", + "content": "†Corresponding authors: xuemx@scut.edu.cn, harxis@outlook.com." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9726" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 239 + ], + "type": "text", + "content": "cies between textual descriptions and spatial relationships arise when viewpoints change. As illustrated in Fig. 1, an object described as being to the right of another—such as \"The nightstand is to the right of the bed\"—from a front-facing view may appear on the left when observed from the opposite direction. These perspective-induced inconsistencies make it significantly harder for models to establish accurate correspondences between textual descriptions and visual information, further degrading performance. Ultimately, both the inherent complexity of multi-anchor queries and the challenges introduced by perspective shifts hinder the accurate interpretation of positional relationships in 3DVG, limiting the overall effectiveness of existing systems." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 243, + 295, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 295, + 554 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 295, + 554 + ], + "type": "text", + "content": "To tackle these challenges, we propose ViewSRD, a framework that formulates 3D visual grounding as a structured multi-view decomposition process. By leveraging the Simple Relation Decoupling (SRD) module, ViewSRD effectively disentangles target-anchor relationships in the complex multi-anchor queries, while the Multi-view Textual-Scene Interaction (Multi-TSI) module integrates multi-view information to enhance grounding accuracy. As illustrated in Fig. 1(b), ViewSRD first applies the SRD module to decompose complex multi-anchor queries into a set of simpler single-anchor queries, isolating interactions between the target and its anchors. This structured decomposition allows the model to more effectively learn positional relationships from textual descriptions. The Multi-TSI module then fuses textual and scene features across multiple viewpoints using Cross-modal Consistent View Tokens (CCVTs), which explicitly encode viewpoint information as learnable cue for both textual and scene module. This mechanism ensures that the model accurately captures spatial interactions, even under perspective shifts. Finally, the Textual-Scene Reasoning module aggregates these multi-view features to accurately predict the final 3D VG results. Extensive experiments have validated the efficacy of our proposed ViewSRD across different 3DVG benchmarks, demonstrating its superior performance across diverse scenarios. In summary, our contributions are fourfold:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 558, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 55, + 558, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 558, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 558, + 295, + 617 + ], + "type": "text", + "content": "- We propose ViewSRD, a framework that formulates 3D visual grounding as a structured multi-view decomposition process, effectively handling complex multi-anchor queries and mitigating text-visual inconsistencies across different perspectives." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 618, + 294, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 294, + 689 + ], + "type": "text", + "content": "- We introduce the Simple Relation Decoupling (SRD) module, which restructures complex multi-anchor queries into simpler single-anchor statements, disentangling target-anchor relationships. This structured decomposition enables the model to extract more effective textual features for grounding." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 689, + 294, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 294, + 713 + ], + "type": "text", + "content": "- We develop the Multi-view Textual-Scene Interaction (Multi-TSI) module to explicitly encode viewpoint infor" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 72, + 553, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 72, + 553, + 120 + ], + "spans": [ + { + "bbox": [ + 322, + 72, + 553, + 120 + ], + "type": "text", + "content": "mation using cross-modal consistent view tokens. This mechanism ensures alignment between textual descriptions and visual features across different perspectives, reducing spatial ambiguities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 121, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 121, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 314, + 121, + 553, + 156 + ], + "type": "text", + "content": "- We conduct extensive evaluations on 3D visual grounding datasets, where ViewSRD achieves state-of-the-art performance, yielding superior performance over prior work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 167, + 400, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 167, + 400, + 179 + ], + "spans": [ + { + "bbox": [ + 314, + 167, + 400, + 179 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 187, + 553, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 187, + 553, + 341 + ], + "spans": [ + { + "bbox": [ + 313, + 187, + 553, + 341 + ], + "type": "text", + "content": "3D Visual Grounding. 3D computer vision has made great progress in various fields [7, 26, 29, 33, 45, 55, 56], the 3D visual grounding (3DVG) task involves identifying a target object in a 3D scene based on a natural language description [19, 44]. Pioneering datasets such as ScanRefer [6] and ReferIt3D [1], built on ScanNet [10], have driven progress in this field. Recent advancements like MVT [18] address view inconsistency by developing a view-robust multi-modal representation. Other works [23, 27, 47, 53] explore multimodal situated reasoning but lack a dedicated focus on handling the high semantic complexity of natural language in 3D grounding, particularly in disentangling intricate sentence structures." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 342, + 554, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 554, + 474 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 554, + 474 + ], + "type": "text", + "content": "Despite these advancements, the complexity of natural language descriptions remains a significant challenge in grounding tasks. Referring expressions often require reasoning over multiple anchor objects to precisely identify the target, making it crucial to disentangle and interpret intricate linguistic structures and spatial dependencies. Our method addresses this challenge by decoupling complex queries into simpler statements, improving the extraction of key relational information. Additionally, by leveraging view tokens, ViewSRD learns more accurate associations between textual descriptions and multi-view information." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 475, + 554, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 554, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 554, + 700 + ], + "type": "text", + "content": "Language Comprehension. Understanding referential language in 3DVG requires models to not only parse spatial descriptions but also interpret object relationships within a scene. Scene graphs, where objects serve as nodes and relationships form directed edges, have been widely used for tasks such as image retrieval and caption evaluation [19]. Traditional approaches employ scene graphs to enhance query comprehension [42], with efforts to convert sentences into structured representations [12, 34, 43] or generate grounded scene graphs for images [22, 24, 50]. However, these methods primarily focus on static, well-defined relationships and struggle with the dynamic, context-dependent nature of natural language. In datasets such as " + }, + { + "bbox": [ + 313, + 475, + 554, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{Nr3D}" + }, + { + "bbox": [ + 313, + 475, + 554, + 700 + ], + "type": "text", + "content": " [1], the complexity of interwoven spatial relationships and ambiguous references makes direct scene graph construction challenging. To address this, we propose leveraging Large Language Models (LLMs) [28, 40, 46] to enhance semantic understanding and spatial reasoning, reducing reliance on rigid structures while improving language comprehension." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 701, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 701, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 314, + 701, + 553, + 713 + ], + "type": "text", + "content": "3D Multi-View Learning. 3D vision research has largely" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9727" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 82, + 558, + 297 + ], + "blocks": [ + { + "bbox": [ + 57, + 82, + 558, + 297 + ], + "lines": [ + { + "bbox": [ + 57, + 82, + 558, + 297 + ], + "spans": [ + { + "bbox": [ + 57, + 82, + 558, + 297 + ], + "type": "image", + "image_path": "e37681de2108e9a0f8c5bbec750f4eef8488e8483ad3a69ccda17c0c0dc2f3dc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 309, + 555, + 365 + ], + "lines": [ + { + "bbox": [ + 54, + 309, + 555, + 365 + ], + "spans": [ + { + "bbox": [ + 54, + 309, + 555, + 365 + ], + "type": "text", + "content": "Figure 2. Overview of ViewSRD. We begin by employing the Simple Relation Decoupling (SRD) module to decompose complex multi-anchor queries into multiple simpler single-anchor queries. Next, text and scene features are extracted separately using the text encoder and scene encoder. To explicitly incorporate scene information into the model, we fuse Cross-modal Consistent View Tokens (CCVTs) with these extracted features. The Multi-view Textual-Scene Interaction (Multi-TSI) module then facilitates a comprehensive interaction between textual and scene information, the 3DVG prediction results are finally generated by the Textual-Scene Reasoning Module." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 375, + 297, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 375, + 297, + 605 + ], + "spans": [ + { + "bbox": [ + 54, + 375, + 297, + 605 + ], + "type": "text", + "content": "focused on generating 2D projections from multiple viewpoints. While LLM-based grounding methods integrate multi-view images, they struggle with accurately identifying the primary viewpoint and demonstrating reliability, as discussed in [17, 51]. MVT [18] maps 3D scenes into multiple perspectives to enhance cross-view feature aggregation but lacks a mechanism to weigh each view's contribution, limiting performance in complex scenes. Similarly, ViewRefer [15] utilizes multi-view prototypes for cross-view interactions but lacks explicit training guidance on view importance. Mikasa [5] incorporates relative spatial coordinate information and a scene-aware module to improve object grounding but does not fully resolve view weighting challenges. In contrast, we propose Cross-modal Consistent View Tokens, which guide the model to dynamically adjust representation spaces and assess whether spatial relationships in decoupled sentences exhibit view dependency. This mechanism enables more reliable multi-view reasoning, improving performance in complex scenes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 619, + 121, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 619, + 121, + 632 + ], + "spans": [ + { + "bbox": [ + 55, + 619, + 121, + 632 + ], + "type": "text", + "content": "3. ViewSRD" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 296, + 715 + ], + "type": "text", + "content": "In the context of 3D point cloud scenes, the term multiview refers to observing a shared scene representation (e.g., XYZ+RGB format) from different simulated viewpoints by rotating the scene around its central axis or camera viewpoints. Each view provides a partial observation of the same 3D environment, resulting in varying object appearances," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 375, + 555, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 375, + 555, + 446 + ], + "spans": [ + { + "bbox": [ + 313, + 375, + 555, + 446 + ], + "type": "text", + "content": "occlusions, and spatial configurations across views. This multi-view setup introduces significant challenges for 3D visual grounding: (1) language-grounded spatial relations must remain consistent across view-dependent variations, and (2) object referents may be partially or completely invisible in certain views." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 448, + 556, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 448, + 556, + 663 + ], + "spans": [ + { + "bbox": [ + 313, + 448, + 556, + 663 + ], + "type": "text", + "content": "To tackle these challenges, we propose ViewSRD, a structured multi-view 3D visual grounding framework. The overall framework of our method is illustrated in Fig. 2. ViewSRD comprises two key components. The first component is the Simple Relation Decoupling (SRD) module, which decomposes multi-anchor queries into a series of single-anchor queries by leveraging the powerful language processing capabilities of LLMs and predefined prompt templates. This decomposition enables more precise inference of relative relationships between objects, improving the model's ability to capture spatial interactions. The second is the Multi-view Textual-Scene Interaction (Multi-TSI) module, which mitigates viewpoint dependency by integrating a shared, cross-modal consistent view token into both the language and visual models. These tokens facilitate feature interaction across perspectives, allowing the visual and textual models to align cross-modal viewpoint information more effectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 671, + 507, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 671, + 507, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 671, + 507, + 685 + ], + "type": "text", + "content": "3.1. Simple Relation Decoupling Module" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 715 + ], + "type": "text", + "content": "The Simple Relation Decoupling (SRD) module is designed to structurally decompose a multi-anchor query into mul" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9728" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 71, + 292, + 331 + ], + "blocks": [ + { + "bbox": [ + 61, + 71, + 292, + 331 + ], + "lines": [ + { + "bbox": [ + 61, + 71, + 292, + 331 + ], + "spans": [ + { + "bbox": [ + 61, + 71, + 292, + 331 + ], + "type": "image", + "image_path": "77876b31e223121186458180f7215c934c21964215ab7e2d277a674b46fc75f7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 343, + 265, + 356 + ], + "lines": [ + { + "bbox": [ + 85, + 343, + 265, + 356 + ], + "spans": [ + { + "bbox": [ + 85, + 343, + 265, + 356 + ], + "type": "text", + "content": "Figure 3. Overview of the SRD Module pipeline." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 364, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 364, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 364, + 295, + 555 + ], + "type": "text", + "content": "tiple simpler single-anchor queries, enhancing the text encoder's ability to comprehend and process relational information. As illustrated in Fig. 3, the SRD module first predicts the target and anchor labels within a sentence, assigning them as the subject and object in the simplified sentence, respectively. This restructuring forms the foundation for generating a structured prompt, which is then fed into an LLM to produce a set of simplified queries. To maintain semantic integrity, we employ a Sentence Matching algorithm which is described in detail in the supplementary material. that filters and retains the most relevant simplified queries, ensuring that the refined queries faithfully preserve the original meaning while improving clarity and interpretability. By disentangling object relationships between the target and multiple anchors, the SRD module enables more precise relational reasoning, enhancing 3DVG performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "text", + "content": "Target and anchors digging. We pre-train a classifier " + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "inline_equation", + "content": "Clas" + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "text", + "content": " to identify the word in a sentence that corresponds to the {Target} object. Given an input sentence, " + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "inline_equation", + "content": "Clas" + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "text", + "content": " first determines which word belongs to the target. Subsequently, we assess whether other words in the sentence appear in the predefined anchor set, " + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "inline_equation", + "content": "A_{lab} = \\{A_{lab1}, A_{lab2}, \\ldots\\}" + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "text", + "content": ", provided by the dataset. If a word matches an entry in this set, it is classified as an {Anchor} object. For more details about the " + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "inline_equation", + "content": "Clas" + }, + { + "bbox": [ + 55, + 556, + 295, + 665 + ], + "type": "text", + "content": ", please refer to the supplementary materials." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "content": "Decoupled multi-anchor queries. In practical referring queries, multiple anchors frequently co-occur within the same sentence, and the spatial relationship of the target is inherently tied to the anchor labels. In such cases, spatial" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 228 + ], + "type": "text", + "content": "descriptions involving multiple objects and their attributes often become entangled. For instance, in the " + }, + { + "bbox": [ + 313, + 72, + 553, + 228 + ], + "type": "inline_equation", + "content": "\\{\\text{Query}\\}" + }, + { + "bbox": [ + 313, + 72, + 553, + 228 + ], + "type": "text", + "content": " illustrated in Fig. 3, the object \"couch\", which is near the target \"pillow\", may dominate the spatial description, thereby weakening the relationship between the target and other anchors. To address the coupling issue in such queries, we design a set of prompt templates based on prior target and anchor digging, the process is shown in Fig. 3. Leveraging the reasoning capabilities of LLMs, we decompose complex multi-anchor queries into simpler single-anchor queries. This decoupling process clarifies the positional relationships between the target and its anchors in 3DVG, enhancing the model's spatial understanding." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "spans": [ + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "text", + "content": "We define a total of " + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "text", + "content": " structured " + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "inline_equation", + "content": "\\{Example\\}" + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "text", + "content": " derived from our pre-designed templates, such as \"The target is on the anchor\", with additional examples provided in the supplementary materials. For each anchor, the model generates " + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "text", + "content": " candidate queries, where " + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 230, + 555, + 362 + ], + "type": "text", + "content": " denotes the number of generated examples. To ensure the selected sentence best aligns with the original query, we apply a sentence-matching algorithm that evaluates both label consistency and semantic consistency. The final ranking is determined by a weighted average of these two scores. For further details, please refer to the supplementary materials." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 380, + 432, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 380, + 432, + 392 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 432, + 392 + ], + "type": "text", + "content": "3.2. Textual Aggregation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": "Given a complex multi-anchor query sentence, the SRD module decomposes it into " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "(I + 1)" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": " sentences, where " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": " represents the number of anchors in the original sentence. Each anchor contributes to a shorter, simplified sentence, while the original complex query remains as a longer reference sentence. To extract meaningful linguistic representations, we employ BERT [11] as the text encoder to extract " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "(I + 1)" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": " sentence features, generating a language feature set " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\{F_{T_0}, F_{T_1}, \\ldots, F_{T_I}\\}" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "F_{T_0}" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": " corresponds to the original complex query, and the remaining elements represent the features of the decoupled simpler queries. To enable the model to effectively learn from diverse sentence representations, we introduce a textual feature aggregation strategy. We randomly sample one feature from the language feature set as the main feature " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{main}}" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": ", while treating the remaining features as auxiliary features " + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{aux}}" + }, + { + "bbox": [ + 313, + 399, + 555, + 604 + ], + "type": "text", + "content": ". The final aggregated feature is computed as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 358, + 618, + 555, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 618, + 555, + 651 + ], + "spans": [ + { + "bbox": [ + 358, + 618, + 555, + 651 + ], + "type": "interline_equation", + "content": "F _ {\\text {a g g}} = \\alpha F _ {\\text {m i n}} + (1 - \\alpha) \\cdot \\frac {1}{I} \\sum_ {i = 1} ^ {I} F _ {\\text {a u x} _ {i}}, \\tag {1}", + "image_path": "290ae8e5a26d5ea9ac31063185142fac49a9ee40f4f5d83b29649f4d61dea056.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "text", + "content": " is uniformly sampled from " + }, + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "inline_equation", + "content": "\\{0, 0.1, 0.3, 0.5\\}" + }, + { + "bbox": [ + 313, + 665, + 554, + 713 + ], + "type": "text", + "content": " during training and fixed at 0.5 during validation. This adaptive fusion strategy ensures smooth feature integration, enhancing the model's robustness in language-conditioned 3DVG." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9729" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 291, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 291, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 291, + 83 + ], + "type": "text", + "content": "3.3. Multi-view Textual-Scene Interaction Module" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 89, + 296, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 89, + 296, + 233 + ], + "spans": [ + { + "bbox": [ + 55, + 89, + 296, + 233 + ], + "type": "text", + "content": "Cross-modal Consistent View Tokens. Previous methods have largely overlooked the inconsistency in textual descriptions arising from perspective shifts in multi-view VG, making it challenging for models to accurately interpret these variations [3, 41]. To address this limitation, we introduce a series of learnable and shared Cross-modal Consistent View Tokens(CCVTs), which are integrated into both the textual and scene modules. By incorporating these tokens, both models are explicitly guided with relevant perspective information, enabling them to more effectively capture and understand the transformations and interactions induced by viewpoint changes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "spans": [ + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "text", + "content": "Formally, let " + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{V} = \\{V_n | n = 1, 2, \\dots, N; V_n \\in \\mathbb{R}^D\\}" + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "text", + "content": " represents the set of CCVTs, where " + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "text", + "content": " denotes the number of viewpoints and " + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 233, + 296, + 328 + ], + "type": "text", + "content": " represents the dimensionality of CCVTs. The CCVTs are jointly optimized with our proposed textual and scene modules. Once trained, their values remain fixed during inference, serving as a stable reference that enhances the model's ability to comprehend multi-view scenarios and resolve perspective-induced inconsistencies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 329, + 296, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 329, + 296, + 400 + ], + "spans": [ + { + "bbox": [ + 55, + 329, + 296, + 400 + ], + "type": "text", + "content": "Multi-view Textual Module. To effectively integrate sentence features from text encoders with viewpoint features extracted from CCVTs, we introduce the Multi-view Textual Module, which employs a cross-attention mechanism [39] to seamlessly encode viewpoint features " + }, + { + "bbox": [ + 55, + 329, + 296, + 400 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 55, + 329, + 296, + 400 + ], + "type": "text", + "content": " into the textual feature space through multi-head attention operation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "type": "text", + "content": "Since each sentence inherently carries distinct viewpoint information, it is crucial to embed perspective-aware features into textual representations effectively. To achieve this, we first compute the normalized dot product between each view token and the 0th token of each sentence's language feature " + }, + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "type": "inline_equation", + "content": "\\{F_{T_0}, F_{T_1}, \\ldots, F_{T_I}\\}" + }, + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "type": "text", + "content": ", as the 0th token " + }, + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "type": "inline_equation", + "content": "F^0" + }, + { + "bbox": [ + 55, + 401, + 296, + 568 + ], + "type": "text", + "content": " typically aggregates the most salient semantic information. We take the average of these dot products across different sentences and compute a corresponding probability distribution using the softmax function. This probability is then used to reweight the view token, adaptively increasing its contribution when the description aligns with the viewpoint and reducing it when the description does not match. The refined viewpoint token is formulated as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 95, + 575, + 295, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 575, + 295, + 609 + ], + "spans": [ + { + "bbox": [ + 95, + 575, + 295, + 609 + ], + "type": "interline_equation", + "content": "\\mathcal {V} = \\operatorname {S o f t m a x} \\left(\\frac {1}{I} \\sum_ {i = 0} ^ {I} \\frac {F _ {T _ {i}} ^ {0} \\mathcal {V} ^ {T}}{\\| F _ {T _ {i}} ^ {0} \\| \\cdot \\| \\mathcal {V} \\|}\\right) \\mathcal {V}. \\tag {2}", + "image_path": "7632a064d305634c6b4c28812423dae992b0b8d8019f09fdea65af9058456263.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "text", + "content": "Subsequently, the aggregated features " + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "inline_equation", + "content": "F_{agg}" + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "text", + "content": ", as introduced in Section 3.2, serve as the query, while the viewpoint features " + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "text", + "content": " act as the key and value in the attention computation. The textual feature enriched with viewpoint embeddings, denoted as " + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "inline_equation", + "content": "F_q'" + }, + { + "bbox": [ + 55, + 617, + 296, + 679 + ], + "type": "text", + "content": ", is formulated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 87, + 687, + 295, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 687, + 295, + 717 + ], + "spans": [ + { + "bbox": [ + 87, + 687, + 295, + 717 + ], + "type": "interline_equation", + "content": "F _ {q} ^ {\\prime} = \\operatorname {S o f t m a x} \\left(\\frac {\\left(W _ {q} F _ {a g g}\\right) \\left(W _ {k} \\mathcal {V}\\right) ^ {T}}{\\sqrt {D}}\\right) W _ {v} \\mathcal {V}, \\tag {3}", + "image_path": "ddf3cb86c4c101cb68de4448f7b8d558bddcd3c5a86a141686c21ecb76623704.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "inline_equation", + "content": "W_{q}" + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "inline_equation", + "content": "W_{k}" + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "inline_equation", + "content": "W_{\\nu}" + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "text", + "content": " are learnable linear projection matrices. Following this, " + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "inline_equation", + "content": "F_{q}^{\\prime}" + }, + { + "bbox": [ + 313, + 72, + 553, + 132 + ], + "type": "text", + "content": " undergoes an additional self-attention operation to further refine the textual features, ensuring that the encoded representations effectively capture perspective-dependent information." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": "Multi-view Scene Module. To effectively capture object features across diverse scenes, we introduce a Multi-View Scene Module that extracts and refines scene representations from multiple viewpoints. To achieve this, we employ PointNet++ [35] as the scene encoder, computing scene features " + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "inline_equation", + "content": "F_{V_n}" + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": " for each viewpoint, where " + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "inline_equation", + "content": "n \\in N" + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": " denotes the scene index across " + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": " viewpoints. Each scene feature " + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "inline_equation", + "content": "F_{V_n}" + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": " consists of object-level representations, expressed as " + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "inline_equation", + "content": "\\{F_{o1}, F_{o2}, \\dots, F_{oi}\\}" + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 313, + 133, + 555, + 251 + ], + "type": "text", + "content": " corresponds to the number of objects present in the scene." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 251, + 553, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 553, + 288 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 553, + 288 + ], + "type": "text", + "content": "To explicitly inform the model of the current scene, we concatenate our CCVTs " + }, + { + "bbox": [ + 313, + 251, + 553, + 288 + ], + "type": "inline_equation", + "content": "V_{n}" + }, + { + "bbox": [ + 313, + 251, + 553, + 288 + ], + "type": "text", + "content": " with the extracted scene features, forming the input representation:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 347, + 297, + 553, + 311 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 297, + 553, + 311 + ], + "spans": [ + { + "bbox": [ + 347, + 297, + 553, + 311 + ], + "type": "interline_equation", + "content": "\\mathbf {X} _ {n} = \\left\\{F _ {V _ {n}}, V _ {n} \\right\\} = \\left\\{F _ {o 1}, F _ {o 2}, \\dots , F _ {o i}, V _ {n} \\right\\}. \\tag {4}", + "image_path": "66e017e79d4678d53f67aca49446449b82ff936cf3222abc2fdc401394b5ec75.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 319, + 554, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 554, + 392 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 554, + 392 + ], + "type": "text", + "content": "These combined feature representations are then processed through several Transformer layers [39], denoted as " + }, + { + "bbox": [ + 313, + 319, + 554, + 392 + ], + "type": "inline_equation", + "content": "\\mathrm{Trans}(\\cdot)" + }, + { + "bbox": [ + 313, + 319, + 554, + 392 + ], + "type": "text", + "content": ", which enhances the relational encoding between objects and viewpoints. This mechanism ensures that both global scene context and fine-grained object details are effectively captured:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 384, + 399, + 553, + 415 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 399, + 553, + 415 + ], + "spans": [ + { + "bbox": [ + 384, + 399, + 553, + 415 + ], + "type": "interline_equation", + "content": "\\mathbf {Z} _ {n} ^ {(l + 1)} = \\operatorname {T r a n s} ^ {(l)} \\left(\\mathbf {Z} _ {n} ^ {(l)}\\right), \\tag {5}", + "image_path": "1fc24a128e4528a40d9c121eb67267e665c667cde1c503a3bb3af6fc7e06c7e8.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "text", + "content": "where the initial input to the Transformer is " + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}_n^{(0)} = \\mathbf{X}_n" + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}_n^{(L)}" + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "text", + "content": " represents the refined features after " + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 426, + 553, + 464 + ], + "type": "text", + "content": " Transformer layers." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 464, + 554, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 554, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 554, + 525 + ], + "type": "text", + "content": "At the final Transformer layer, the output consists of both [object] tokens and [view] tokens. Since the transformed features " + }, + { + "bbox": [ + 313, + 464, + 554, + 525 + ], + "type": "inline_equation", + "content": "F_{V_n}'" + }, + { + "bbox": [ + 313, + 464, + 554, + 525 + ], + "type": "text", + "content": " encapsulate both object-specific and viewpoint information, we retain only the [object] tokens for the subsequent grounding task:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 378, + 533, + 553, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 533, + 553, + 548 + ], + "spans": [ + { + "bbox": [ + 378, + 533, + 553, + 548 + ], + "type": "interline_equation", + "content": "F _ {V _ {n}} ^ {\\prime} = \\left\\{F _ {o 1} ^ {\\prime}, F _ {o 2} ^ {\\prime}, \\dots , F _ {o i} ^ {\\prime} \\right\\}. \\tag {6}", + "image_path": "fd59fc533269c250afc65f7dd7c560786bcb8953ab3e108f7902274d61281a6b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 556, + 554, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 554, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 554, + 593 + ], + "type": "text", + "content": "This design ensures that object representations are enriched with multi-view contextual information while maintaining their distinct semantic properties for accurate 3DVG." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 600, + 493, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 600, + 493, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 600, + 493, + 613 + ], + "type": "text", + "content": "3.4. Textual-Scene Reasoning Module" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": "With the above formulation, we obtain the view-interactive textual features " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "F_{q}^{\\prime}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " and scene features " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "F_{V}^{\\prime}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "F_{V}^{\\prime} = \\{F_{V_{n}}^{\\prime} \\mid n = 1,2,\\dots,N\\}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": ", each enriched with viewpoint information. These features are then processed through the proposed Textual-Scene Reasoning Module to generate the final prediction. This module primarily consists of a Transformer with a cross-attention mechanism [39], where " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "F_{V}^{\\prime}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " serves as the query, while " + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "inline_equation", + "content": "F_{q}^{\\prime}" + }, + { + "bbox": [ + 313, + 617, + 555, + 715 + ], + "type": "text", + "content": " functions as the key and" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9730" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 74, + 72, + 92, + 89 + ], + "blocks": [ + { + "bbox": [ + 74, + 72, + 92, + 89 + ], + "lines": [ + { + "bbox": [ + 74, + 72, + 92, + 89 + ], + "spans": [ + { + "bbox": [ + 74, + 72, + 92, + 89 + ], + "type": "image", + "image_path": "eab320a4aa093e1a0a09eb54348963095a829c995355ccfeda5b59a77e1aa50e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 96, + 76, + 138, + 83 + ], + "lines": [ + { + "bbox": [ + 96, + 76, + 138, + 83 + ], + "spans": [ + { + "bbox": [ + 96, + 76, + 138, + 83 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 189, + 72, + 207, + 89 + ], + "blocks": [ + { + "bbox": [ + 189, + 72, + 207, + 89 + ], + "lines": [ + { + "bbox": [ + 189, + 72, + 207, + 89 + ], + "spans": [ + { + "bbox": [ + 189, + 72, + 207, + 89 + ], + "type": "image", + "image_path": "5e1275eb13913bf36b808131b4383c19e25874041f10d0499bc4543c1ebed529.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 212, + 76, + 277, + 83 + ], + "lines": [ + { + "bbox": [ + 212, + 76, + 277, + 83 + ], + "spans": [ + { + "bbox": [ + 212, + 76, + 277, + 83 + ], + "type": "text", + "content": "incorrect predictions" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 175, + 96, + 191, + 103 + ], + "lines": [ + { + "bbox": [ + 175, + 96, + 191, + 103 + ], + "spans": [ + { + "bbox": [ + 175, + 96, + 191, + 103 + ], + "type": "text", + "content": "MVT" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 320, + 72, + 338, + 89 + ], + "blocks": [ + { + "bbox": [ + 320, + 72, + 338, + 89 + ], + "lines": [ + { + "bbox": [ + 320, + 72, + 338, + 89 + ], + "spans": [ + { + "bbox": [ + 320, + 72, + 338, + 89 + ], + "type": "image", + "image_path": "b3544930bfe2050df8eb84b270fb226a2830f396252a9b0455fe4e54ca2a5c81.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 77, + 402, + 85 + ], + "lines": [ + { + "bbox": [ + 342, + 77, + 402, + 85 + ], + "spans": [ + { + "bbox": [ + 342, + 77, + 402, + 85 + ], + "type": "text", + "content": "correct predictions" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 427, + 72, + 444, + 89 + ], + "blocks": [ + { + "bbox": [ + 427, + 72, + 444, + 89 + ], + "lines": [ + { + "bbox": [ + 427, + 72, + 444, + 89 + ], + "spans": [ + { + "bbox": [ + 427, + 72, + 444, + 89 + ], + "type": "image", + "image_path": "5fb571b271ea56608fd7c7c6f62d0d3bb2faf8118e6a911463d938c27d9d1c1a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 447, + 76, + 503, + 83 + ], + "lines": [ + { + "bbox": [ + 447, + 76, + 503, + 83 + ], + "spans": [ + { + "bbox": [ + 447, + 76, + 503, + 83 + ], + "type": "text", + "content": "Mentioned Object" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 59, + 107, + 137, + 160 + ], + "blocks": [ + { + "bbox": [ + 77, + 96, + 119, + 103 + ], + "lines": [ + { + "bbox": [ + 77, + 96, + 119, + 103 + ], + "spans": [ + { + "bbox": [ + 77, + 96, + 119, + 103 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 107, + 137, + 160 + ], + "lines": [ + { + "bbox": [ + 59, + 107, + 137, + 160 + ], + "spans": [ + { + "bbox": [ + 59, + 107, + 137, + 160 + ], + "type": "image", + "image_path": "c149c4de3a8c338b77009e2de4bc646b7704f071a6738cc18d6d896b22d30dbd.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 141, + 108, + 219, + 160 + ], + "blocks": [ + { + "bbox": [ + 141, + 108, + 219, + 160 + ], + "lines": [ + { + "bbox": [ + 141, + 108, + 219, + 160 + ], + "spans": [ + { + "bbox": [ + 141, + 108, + 219, + 160 + ], + "type": "image", + "image_path": "b49289c54111f079b81ab5b8963f6f7120532f80730b0fbe8865d3312ed86137.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 163, + 320, + 171 + ], + "lines": [ + { + "bbox": [ + 58, + 163, + 320, + 171 + ], + "spans": [ + { + "bbox": [ + 58, + 163, + 320, + 171 + ], + "type": "text", + "content": "Complex multi-anchor query: Find the chair that is at the desk with a computer monitor on it." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 223, + 108, + 302, + 160 + ], + "blocks": [ + { + "bbox": [ + 247, + 95, + 280, + 102 + ], + "lines": [ + { + "bbox": [ + 247, + 95, + 280, + 102 + ], + "spans": [ + { + "bbox": [ + 247, + 95, + 280, + 102 + ], + "type": "text", + "content": "CoT3DRef" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 223, + 108, + 302, + 160 + ], + "lines": [ + { + "bbox": [ + 223, + 108, + 302, + 160 + ], + "spans": [ + { + "bbox": [ + 223, + 108, + 302, + 160 + ], + "type": "image", + "image_path": "95cb992526d27d3664042219440eaa408c6038157d9f13a432871fad19da47e9.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 307, + 108, + 384, + 160 + ], + "blocks": [ + { + "bbox": [ + 326, + 95, + 356, + 102 + ], + "lines": [ + { + "bbox": [ + 326, + 95, + 356, + 102 + ], + "spans": [ + { + "bbox": [ + 326, + 95, + 356, + 102 + ], + "type": "text", + "content": "ViewSRD" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 108, + 384, + 160 + ], + "lines": [ + { + "bbox": [ + 307, + 108, + 384, + 160 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 384, + 160 + ], + "type": "image", + "image_path": "f5f5f0d2610bf5ecbc6f2630be133044fb29858f4af05dad6193523d8c897efb.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 389, + 108, + 470, + 160 + ], + "blocks": [ + { + "bbox": [ + 449, + 95, + 492, + 102 + ], + "lines": [ + { + "bbox": [ + 449, + 95, + 492, + 102 + ], + "spans": [ + { + "bbox": [ + 449, + 95, + 492, + 102 + ], + "type": "text", + "content": "Simple Queris" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 389, + 108, + 470, + 160 + ], + "lines": [ + { + "bbox": [ + 389, + 108, + 470, + 160 + ], + "spans": [ + { + "bbox": [ + 389, + 108, + 470, + 160 + ], + "type": "image", + "image_path": "f7c22f04be7dbb0aeaff017a1870c824d5f5e00d4038cacaf53277ef875edc60.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 392, + 163, + 474, + 171 + ], + "lines": [ + { + "bbox": [ + 392, + 163, + 474, + 171 + ], + "spans": [ + { + "bbox": [ + 392, + 163, + 474, + 171 + ], + "type": "text", + "content": "The chair is with monitor on it." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 473, + 108, + 553, + 160 + ], + "blocks": [ + { + "bbox": [ + 473, + 108, + 553, + 160 + ], + "lines": [ + { + "bbox": [ + 473, + 108, + 553, + 160 + ], + "spans": [ + { + "bbox": [ + 473, + 108, + 553, + 160 + ], + "type": "image", + "image_path": "530ed46376e10e77852b4c228f60b9c9155e8c19ee09b48476902b3e0a29fb30.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 481, + 163, + 543, + 171 + ], + "lines": [ + { + "bbox": [ + 481, + 163, + 543, + 171 + ], + "spans": [ + { + "bbox": [ + 481, + 163, + 543, + 171 + ], + "type": "text", + "content": "The chair is at the desk." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 59, + 175, + 138, + 223 + ], + "blocks": [ + { + "bbox": [ + 59, + 175, + 138, + 223 + ], + "lines": [ + { + "bbox": [ + 59, + 175, + 138, + 223 + ], + "spans": [ + { + "bbox": [ + 59, + 175, + 138, + 223 + ], + "type": "image", + "image_path": "11f835dbb2f1d03af174defc265118f3fbcfe6a78a0161aadc407577c4fc29ad.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 225, + 348, + 233 + ], + "lines": [ + { + "bbox": [ + 58, + 225, + 348, + 233 + ], + "spans": [ + { + "bbox": [ + 58, + 225, + 348, + 233 + ], + "type": "text", + "content": "Complex multi-anchor query: There are 2 trash cans under the sink. Select the smaller one on the right." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 142, + 175, + 220, + 223 + ], + "blocks": [ + { + "bbox": [ + 142, + 175, + 220, + 223 + ], + "lines": [ + { + "bbox": [ + 142, + 175, + 220, + 223 + ], + "spans": [ + { + "bbox": [ + 142, + 175, + 220, + 223 + ], + "type": "image", + "image_path": "065bb2a6f238b1db2299fd4590de1198da22b9f6679cac2bed2b93d40b1ca310.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 225, + 175, + 301, + 223 + ], + "blocks": [ + { + "bbox": [ + 225, + 175, + 301, + 223 + ], + "lines": [ + { + "bbox": [ + 225, + 175, + 301, + 223 + ], + "spans": [ + { + "bbox": [ + 225, + 175, + 301, + 223 + ], + "type": "image", + "image_path": "7a445e61e35c1244f29b91bab6f708e8005259ba4817be3f50b66089fccac252.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 306, + 175, + 383, + 223 + ], + "blocks": [ + { + "bbox": [ + 306, + 175, + 383, + 223 + ], + "lines": [ + { + "bbox": [ + 306, + 175, + 383, + 223 + ], + "spans": [ + { + "bbox": [ + 306, + 175, + 383, + 223 + ], + "type": "image", + "image_path": "6c0d4b646259957f37bb2a501ddb6608a95684c85f41c2b89ad6438dccac49d9.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 389, + 175, + 470, + 223 + ], + "blocks": [ + { + "bbox": [ + 389, + 175, + 470, + 223 + ], + "lines": [ + { + "bbox": [ + 389, + 175, + 470, + 223 + ], + "spans": [ + { + "bbox": [ + 389, + 175, + 470, + 223 + ], + "type": "image", + "image_path": "6006126a568261dc266c962781defe6534b40da65d487b4d87df9c38592960e1.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 386, + 225, + 476, + 232 + ], + "lines": [ + { + "bbox": [ + 386, + 225, + 476, + 232 + ], + "spans": [ + { + "bbox": [ + 386, + 225, + 476, + 232 + ], + "type": "text", + "content": "The trash cans are under the sink." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 474, + 175, + 553, + 223 + ], + "blocks": [ + { + "bbox": [ + 474, + 175, + 553, + 223 + ], + "lines": [ + { + "bbox": [ + 474, + 175, + 553, + 223 + ], + "spans": [ + { + "bbox": [ + 474, + 175, + 553, + 223 + ], + "type": "image", + "image_path": "12bbc812fbb669456efeb24d4dc777a16d2ea5488a8f7e902a61212f7d9b2599.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 480, + 225, + 544, + 239 + ], + "lines": [ + { + "bbox": [ + 480, + 225, + 544, + 239 + ], + "spans": [ + { + "bbox": [ + 480, + 225, + 544, + 239 + ], + "type": "text", + "content": "The target is the smaller trash can on the right." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 58, + 242, + 138, + 291 + ], + "blocks": [ + { + "bbox": [ + 58, + 242, + 138, + 291 + ], + "lines": [ + { + "bbox": [ + 58, + 242, + 138, + 291 + ], + "spans": [ + { + "bbox": [ + 58, + 242, + 138, + 291 + ], + "type": "image", + "image_path": "8170dc158b7d08c3685aa3ee25b7206c7b3655b7d6d666d875c00ef82ef6cf7a.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 58, + 292, + 140, + 300 + ], + "lines": [ + { + "bbox": [ + 58, + 292, + 140, + 300 + ], + "spans": [ + { + "bbox": [ + 58, + 292, + 140, + 300 + ], + "type": "text", + "content": "Complex multi-anchor query:" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 90, + 308, + 100, + 316 + ], + "lines": [ + { + "bbox": [ + 90, + 308, + 100, + 316 + ], + "spans": [ + { + "bbox": [ + 90, + 308, + 100, + 316 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 328, + 554, + 361 + ], + "lines": [ + { + "bbox": [ + 55, + 328, + 554, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 328, + 554, + 361 + ], + "type": "text", + "content": "Figure 4. Visualization Results of the 3D Visual Grounding Results. For the presented 3D scenes, we utilize green, red, blue, and yellow boxes to represent the ground truth, incorrect predictions, correct predictions, and the mentioned objects, respectively. Columns (e) and (f) present the decomposed simple queries derived from the complex queries." + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 141, + 242, + 220, + 291 + ], + "blocks": [ + { + "bbox": [ + 141, + 242, + 220, + 291 + ], + "lines": [ + { + "bbox": [ + 141, + 242, + 220, + 291 + ], + "spans": [ + { + "bbox": [ + 141, + 242, + 220, + 291 + ], + "type": "image", + "image_path": "a44851b1ae326977c23cf76679dae8435ede9c0e6e9f8346a3c242e36dbece2c.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 292, + 220, + 300 + ], + "lines": [ + { + "bbox": [ + 141, + 292, + 220, + 300 + ], + "spans": [ + { + "bbox": [ + 141, + 292, + 220, + 300 + ], + "type": "text", + "content": "The nightstand to the right of" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 175, + 308, + 184, + 316 + ], + "lines": [ + { + "bbox": [ + 175, + 308, + 184, + 316 + ], + "spans": [ + { + "bbox": [ + 175, + 308, + 184, + 316 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 225, + 242, + 301, + 291 + ], + "blocks": [ + { + "bbox": [ + 225, + 242, + 301, + 291 + ], + "lines": [ + { + "bbox": [ + 225, + 242, + 301, + 291 + ], + "spans": [ + { + "bbox": [ + 225, + 242, + 301, + 291 + ], + "type": "image", + "image_path": "56b449e314adaa9d4b9d19d7cd620f1a263bdad65604d60fc6a2c101f2dc7405.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 292, + 371, + 300 + ], + "lines": [ + { + "bbox": [ + 225, + 292, + 371, + 300 + ], + "spans": [ + { + "bbox": [ + 225, + 292, + 371, + 300 + ], + "type": "text", + "content": "of the bed, closest to the wall with the framed picture." + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 261, + 308, + 271, + 316 + ], + "lines": [ + { + "bbox": [ + 261, + 308, + 271, + 316 + ], + "spans": [ + { + "bbox": [ + 261, + 308, + 271, + 316 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 306, + 242, + 383, + 291 + ], + "blocks": [ + { + "bbox": [ + 306, + 242, + 383, + 291 + ], + "lines": [ + { + "bbox": [ + 306, + 242, + 383, + 291 + ], + "spans": [ + { + "bbox": [ + 306, + 242, + 383, + 291 + ], + "type": "image", + "image_path": "32a87899ed66c4fa58909669e57355887b7f6484ac9284eae8fecd4b69e93333.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 308, + 355, + 317 + ], + "lines": [ + { + "bbox": [ + 345, + 308, + 355, + 317 + ], + "spans": [ + { + "bbox": [ + 345, + 308, + 355, + 317 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 388, + 242, + 470, + 291 + ], + "blocks": [ + { + "bbox": [ + 388, + 242, + 470, + 291 + ], + "lines": [ + { + "bbox": [ + 388, + 242, + 470, + 291 + ], + "spans": [ + { + "bbox": [ + 388, + 242, + 470, + 291 + ], + "type": "image", + "image_path": "879be34d395ac7f61c54a22ee7f75b9ae45ad4f5554cb46d17b49cf2ef5e3a08.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 292, + 455, + 306 + ], + "lines": [ + { + "bbox": [ + 391, + 292, + 455, + 306 + ], + "spans": [ + { + "bbox": [ + 391, + 292, + 455, + 306 + ], + "type": "text", + "content": "The nightstand is to the right of the bed." + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 425, + 308, + 435, + 317 + ], + "lines": [ + { + "bbox": [ + 425, + 308, + 435, + 317 + ], + "spans": [ + { + "bbox": [ + 425, + 308, + 435, + 317 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 473, + 242, + 553, + 291 + ], + "blocks": [ + { + "bbox": [ + 473, + 242, + 553, + 291 + ], + "lines": [ + { + "bbox": [ + 473, + 242, + 553, + 291 + ], + "spans": [ + { + "bbox": [ + 473, + 242, + 553, + 291 + ], + "type": "image", + "image_path": "7e8945f5de11d68a090c92b9585b0a6d4e7d11e1fe63e5cef509be786cdc58f0.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 473, + 292, + 553, + 306 + ], + "lines": [ + { + "bbox": [ + 473, + 292, + 553, + 306 + ], + "spans": [ + { + "bbox": [ + 473, + 292, + 553, + 306 + ], + "type": "text", + "content": "The nightstand is closest to the wall with the framed picture." + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 509, + 308, + 517, + 316 + ], + "lines": [ + { + "bbox": [ + 509, + 308, + 517, + 316 + ], + "spans": [ + { + "bbox": [ + 509, + 308, + 517, + 316 + ], + "type": "text", + "content": "(f)" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_caption" + } + ], + "index": 36 + }, + { + "bbox": [ + 55, + 368, + 296, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 368, + 296, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 368, + 296, + 464 + ], + "type": "text", + "content": "value, facilitating fine-grained alignment between textual and visual representations. Additionally, a View Aggregation mechanism integrates information across multiple viewpoints by computing both the average and maximum values of the output features. Finally, a Prediction Head projects the aggregated features into the result space, enabling a view-aware 3D Visual Grounding model capable of effectively reasoning across multiple perspectives." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 55, + 471, + 186, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 471, + 186, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 471, + 186, + 482 + ], + "type": "text", + "content": "3.5. Overall Loss Functions" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": "Following prior research [18, 36], three distinct loss functions are applied on ViewSRD. These include a referential loss " + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{Ref}" + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": " derived from grounding predictions, an object-level loss " + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{Object}" + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": " capturing object shape and center and a sentence-level loss " + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{Sent}" + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": " designed to identify the target and anchor phrases within the " + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "inline_equation", + "content": "F_{\\mathrm{agg}}" + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": ". Similarly to [2], we extend the referential loss " + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{Ref}" + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": " to localize both the target and the anchors, which we term as parallel referential loss " + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{ref}}^P" + }, + { + "bbox": [ + 55, + 488, + 296, + 620 + ], + "type": "text", + "content": ", where both the target and anchors are localized simultaneously. For details of these losses, please refer to supplementary materials. The total loss function is defined as:" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 80, + 628, + 295, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 628, + 295, + 643 + ], + "spans": [ + { + "bbox": [ + 80, + 628, + 295, + 643 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {O b j} \\mathcal {L} _ {O b j e c t} + \\lambda_ {R e f} \\mathcal {L} _ {R e f} ^ {P} + \\lambda_ {S e n t} \\mathcal {L} _ {S e n t}. \\tag {7}", + "image_path": "6ec257683d654957962e4475a6666377b352491adebfcd94eec0ff49e023a147.jpg" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 55, + 652, + 137, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 652, + 137, + 666 + ], + "spans": [ + { + "bbox": [ + 55, + 652, + 137, + 666 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 55, + 672, + 173, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 672, + 173, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 672, + 173, + 685 + ], + "type": "text", + "content": "4.1. Experiment Settings" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "Datasets. Nr3D [1] contains 45,503 human utterances referencing 707 indoor scenes from ScanNet [10], cover" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 313, + 368, + 554, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 368, + 554, + 450 + ], + "spans": [ + { + "bbox": [ + 313, + 368, + 554, + 450 + ], + "type": "text", + "content": "ing 76 object categories with multiple same-class distractors. Sr3D [1] includes 83,572 template-based sentences in a \"target-spatial relation-anchor\" format, offering a simpler setup with similar distractors. ScanRefer [6] provides 51,583 free-form descriptions for 11,046 objects across 800 ScanNet scenes, incorporating spatial and attribute-level references to support 3DVG." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "spans": [ + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "type": "text", + "content": "Evaluation Metrics. For " + }, + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "type": "inline_equation", + "content": "\\mathrm{Nr3D}" + }, + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "type": "inline_equation", + "content": "\\mathrm{Sr3D}" + }, + { + "bbox": [ + 313, + 451, + 554, + 511 + ], + "type": "text", + "content": ", grounding accuracy is measured by the percentage of correctly matched boxes [18, 36]. For ScanRefer, we report Acc@0.25 and Acc@0.5, i.e., the percentage of predicted boxes with IoU exceeding 0.25 or 0.5, respectively [42]." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "type": "text", + "content": "Implementation Details. All experiments are implemented in PyTorch and run on a single RTX 4090 GPU. We use AdamW [30] with a learning rate of 0.0005. The number of input views is set to " + }, + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "type": "inline_equation", + "content": "N = 4" + }, + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "type": "text", + "content": ". We set " + }, + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "type": "inline_equation", + "content": "\\lambda_{Ref}, \\lambda_{Obj}, \\lambda_{Sent} = 1.0, 0.5, 0.5" + }, + { + "bbox": [ + 313, + 511, + 554, + 594 + ], + "type": "text", + "content": ". For the SRD module, we adopt DeepSeekR1 [28], which balances performance and reproducibility, and can runs on a RTX 4090." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 313, + 600, + 473, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 600, + 473, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 600, + 473, + 613 + ], + "type": "text", + "content": "4.2.3D Visual Grounding Results" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": "We compare ViewSRD with recent state-of-the-art approaches to evaluate its effectiveness on 3DVG. Fig. 4 illustrates complex query cases from " + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{Nr3D}" + }, + { + "bbox": [ + 313, + 617, + 555, + 713 + ], + "type": "text", + "content": " [1], including ground truth boxes, predictions from MVT [18], CoT3DRef [2], and ViewSRD, along with the original queries and the simplified sentences produced by the SRD module. In multi-anchor scenarios (e.g., involving \"bed\", \"table\", and \"chair\"), MVT and CoT3DRef often misalign predictions" + } + ] + } + ], + "index": 60 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "9731" + } + ] + } + ], + "index": 61 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 82, + 553, + 216 + ], + "blocks": [ + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "lines": [ + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "spans": [ + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "type": "text", + "content": "Table 1. Performance " + }, + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "type": "inline_equation", + "content": "\\left( \\% \\right)" + }, + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "type": "text", + "content": " comparison on " + }, + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "type": "inline_equation", + "content": "\\mathrm{{Nr3D}}\\left\\lbrack 1\\right\\rbrack" + }, + { + "bbox": [ + 186, + 70, + 424, + 81 + ], + "type": "text", + "content": " and Sr3D [1]." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 82, + 553, + 216 + ], + "lines": [ + { + "bbox": [ + 59, + 82, + 553, + 216 + ], + "spans": [ + { + "bbox": [ + 59, + 82, + 553, + 216 + ], + "type": "table", + "html": "
MethodNr3DSr3D
OverallEasyHardView Dep.View Indep.OverallEasyHardView Dep.View Indep.
3DVG-Transformer [54]40.848.534.834.843.751.454.244.944.651.7
LanguageRefer [36]43.951.036.641.745.056.058.949.349.256.3
TransRefer3D [16]42.148.536.036.544.957.460.550.249.957.7
SAT [48]49.256.342.446.950.457.961.250.049.258.3
MVT [18]55.161.349.154.355.464.566.958.858.464.7
ViewRefer [15]56.063.049.755.156.867.068.962.152.267.7
MiKASA [5]64.469.759.465.464.075.278.667.370.475.4
CoT3DRef [2]64.470.059.261.965.773.275.267.967.673.5
ViewSRD (ours)69.975.364.868.670.676.078.370.669.076.2
", + "image_path": "d4ac83f9f8c8d4106453b587f62181ce0fec52985330f88c7ae1ed5b4eec8e45.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 226, + 294, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 226, + 294, + 321 + ], + "spans": [ + { + "bbox": [ + 54, + 226, + 294, + 321 + ], + "type": "text", + "content": "due to challenges in spatial reasoning. In contrast, ViewSRD correctly grounds targets by decomposing complex queries and leveraging robust spatial relationships between target-anchor pairs. Moreover, under viewpoint shifts, CoT3DRef struggles to maintain alignment, whereas ViewSRD reliably grounds targets by capturing spatial relations invariant to viewpoint changes (e.g., \"The trash cans are under the sink\")." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": "Quantitative results on " + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "inline_equation", + "content": "\\mathrm{Nr3D}" + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": " (Table 1) show that ViewSRD achieves a " + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "inline_equation", + "content": "5.2\\%" + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": " accuracy gain over the best prior method, CoT3DRef, under identical settings. Under view-dependent evaluation, it further outperforms CoT3DRef by " + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "inline_equation", + "content": "6.7\\%" + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": ", demonstrating the effectiveness of CCVTs in aligning textual and visual spaces and modeling viewpoint-sensitive relations through query decomposition. To assess generalization, we also evaluate on Sr3D [1] (Table 1). ViewSRD achieves the highest accuracy of " + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "inline_equation", + "content": "76.2\\%" + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": " in the View-Independent setting, with additional gains of " + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "inline_equation", + "content": "+2.8\\%" + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "inline_equation", + "content": "+2.7\\%" + }, + { + "bbox": [ + 55, + 323, + 295, + 479 + ], + "type": "text", + "content": " in the View-Independence and Hard scenarios, respectively. These results confirm the robustness and generalizability of our approach across diverse scenario." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 491, + 173, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 173, + 504 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 173, + 504 + ], + "type": "text", + "content": "4.3. Analysis of Anchors" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 510, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 510, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 510, + 295, + 714 + ], + "type": "text", + "content": "In this section, we analyze the impact of the number of anchors in a query on 3DVG performance. The results presented in Table 2 underscore the effectiveness of our approach, particularly in multi-anchor scenarios, where our method successfully disentangles spatial relationships by explicitly modeling target-anchor interactions. In contrast, existing methods such as MVT [18] and CoT3DRef [2], which do not account for the necessity of spatial relationship decoupling, exhibit a notable performance decline in multi-anchor queries compared to single-anchor cases. Notably, our approach achieves higher accuracy in multi-anchor queries than in single-anchor ones, demonstrating that when properly processed, multi-anchor information enhances 3DVG performance rather than introducing ambiguity. These findings validate the efficacy of ViewSRD in effectively leveraging complex spatial relationships for improved grounding accuracy." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 331, + 247, + 538, + 304 + ], + "blocks": [ + { + "bbox": [ + 313, + 224, + 553, + 246 + ], + "lines": [ + { + "bbox": [ + 313, + 224, + 553, + 246 + ], + "spans": [ + { + "bbox": [ + 313, + 224, + 553, + 246 + ], + "type": "text", + "content": "Table 2. Performance (%) comparison on Nr3D [1] with new criterions Multi-Anc and Single-Anc." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 331, + 247, + 538, + 304 + ], + "lines": [ + { + "bbox": [ + 331, + 247, + 538, + 304 + ], + "spans": [ + { + "bbox": [ + 331, + 247, + 538, + 304 + ], + "type": "table", + "html": "
ModelMulti-AncSingle-AncOverall
MVT [18]52.656.655.1
CoT3DRef [2]63.165.264.4
ViewSRD71.569.569.9
", + "image_path": "9f14dff704c41e0dc548d6e00b096b16837c919ff8cc1b982b3ecd803d567858.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 316, + 353, + 553, + 469 + ], + "blocks": [ + { + "bbox": [ + 313, + 331, + 553, + 352 + ], + "lines": [ + { + "bbox": [ + 313, + 331, + 553, + 352 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 553, + 352 + ], + "type": "text", + "content": "Table 3. Performance " + }, + { + "bbox": [ + 313, + 331, + 553, + 352 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 313, + 331, + 553, + 352 + ], + "type": "text", + "content": " of SRD module improves MVT [18], BUTD-DETR [21] and EDA [42] on ScanRefer [6] dataset." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 353, + 553, + 469 + ], + "lines": [ + { + "bbox": [ + 316, + 353, + 553, + 469 + ], + "spans": [ + { + "bbox": [ + 316, + 353, + 553, + 469 + ], + "type": "table", + "html": "
MethodUnique (19%)Multiple (81%)Overall
0.250.50.250.50.250.5
MVT [18]77.766.531.925.340.833.3
MVT+SRD78.667.234.127.142.1 (3.2%↑)34.3(3.0%↑)
BUTD-DETR [21]82.864.944.733.950.438.6
BUTD-DETR+SRD85.066.245.334.257.9 (14.9%↑)45.7 (18.4%↑)
EDA [42]80.465.335.625.143.632.3
EDA+SRD81.067.336.428.344.4 (1.8%↑)35.3 (9.3%↑)
ViewSRD82.168.237.429.045.436.0
", + "image_path": "ab5c0c0e289b5bf25ce81761d2dc400572c6ff2e050968ee799a1f9b711d7f5c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 495, + 515, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 495, + 515, + 508 + ], + "spans": [ + { + "bbox": [ + 313, + 495, + 515, + 508 + ], + "type": "text", + "content": "4.4. SRD Enhances Other 3DVG Methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 521, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 521, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 312, + 521, + 555, + 714 + ], + "type": "text", + "content": "Our SRD module is inherently model-agnostic, operating independently of the training process by focusing exclusively on decoupling complex multi-anchor queries into simpler single-anchor queries. This decoupling mechanism reduces ambiguity in multi-anchor descriptions, enhances target grounding, and serves as a model-independent preprocessing step, ensuring seamless compatibility with various 3DVG methods to improve performance without modifying existing architectures. As demonstrated in Table 3, integrating SRD into MVT [18], BUTD-DETR [21] and EDA [42] consistently leads to performance improvements. These results highlight SRD's ability to refine query interpretation by effectively disentangling target-anchor relationships, thereby reducing errors introduced by complex linguistic structures. These improvements reinforce the critical role of SRD module in enhancing accuracy of 3DVG." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9732" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 92, + 298, + 193 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 295, + 92 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 295, + 92 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 295, + 92 + ], + "type": "text", + "content": "Table 4. Ablation studies on Nr3D [1]. All components contribute to final performance(%)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 92, + 298, + 193 + ], + "lines": [ + { + "bbox": [ + 56, + 92, + 298, + 193 + ], + "spans": [ + { + "bbox": [ + 56, + 92, + 298, + 193 + ], + "type": "table", + "html": "
ComponentOverallEasyHardView Dep.View Indep.
w/o CCVTs.62.268.556.160.163.2
w/o Textual M.68.073.562.667.668.1
w/o Scene M.64.670.558.963.864.9
w/o SRD M.68.673.064.866.570.0
w/o Weight.69.074.264.066.570.2
LLM-Aug.69.174.563.768.069.5
ViewSRD69.975.364.868.670.6
", + "image_path": "82bd451bff67e5bcd23e3d4c7636269a4c3f4527f05f1641f4f3a731b8dcc2c8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 58, + 205, + 293, + 315 + ], + "blocks": [ + { + "bbox": [ + 88, + 194, + 263, + 205 + ], + "lines": [ + { + "bbox": [ + 88, + 194, + 263, + 205 + ], + "spans": [ + { + "bbox": [ + 88, + 194, + 263, + 205 + ], + "type": "text", + "content": "Table 5. Ablation of view numbers on Nr3D [1]." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 205, + 293, + 315 + ], + "lines": [ + { + "bbox": [ + 58, + 205, + 293, + 315 + ], + "spans": [ + { + "bbox": [ + 58, + 205, + 293, + 315 + ], + "type": "table", + "html": "
View NumberOverallEasyHardView Dep.View Indep.
TrainTest
4166.071.760.564.067.0
4268.975.163.066.969.9
4469.975.364.868.670.6
1164.470.958.160.866.2
2267.773.062.566.168.4
8868.474.163.067.468.9
", + "image_path": "6ccde13005cf71e609a0cf599906bd7f807c7fba157b6e0e02aee1ea0465ffe7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 325, + 149, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 325, + 149, + 338 + ], + "spans": [ + { + "bbox": [ + 55, + 325, + 149, + 338 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 342, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 342, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 342, + 295, + 628 + ], + "type": "text", + "content": "Analysis of ViewSRD Components. To assess the contribution of each component within ViewSRD, we conducted detailed ablation studies on the Nr3D dataset [1]. Starting from the full model, we systematically removed key modules one at a time to evaluate their individual impact. The results, presented in Table 4, demonstrate that each component plays a crucial role in enhancing model performance across different scenarios. Notably, the removal of the CCVTs leads to the most significant performance degradation. This is primarily because, without the view token, the model lacks explicit viewpoint information, impairing its ability to distinguish between different perspectives. Similarly, removing either the textual module or the scene module results in a noticeable decline, underscoring the necessity of cross-modal interaction. When view-alignment weighting is disabled (w/o Weight), performance drops by " + }, + { + "bbox": [ + 55, + 342, + 295, + 628 + ], + "type": "inline_equation", + "content": "0.9\\%" + }, + { + "bbox": [ + 55, + 342, + 295, + 628 + ], + "type": "text", + "content": ", showing that dynamic alignment of view features is critical for performance under view-dependent conditions. Removing the SRD module leads to performance degradation, confirming the benefit of multi-anchor query decoupling. We also compare it with an LLM-based augmentation method from Multi3DRefer [52] and find that SRD achieves greater gains, highlighting the advantage of structured query decomposition over generic augmentation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 630, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 713 + ], + "type": "text", + "content": "Analysis of Multi-View Modeling. We evaluate the effect of varying view counts on 3DVG performance using the Nr3D dataset. As shown in Table 5, testing with more views consistently improves accuracy when the model is trained with four views, highlighting the benefit of aggregating complementary spatial cues from multiple perspectives. When training and testing with the same number of" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 366, + 93, + 502, + 162 + ], + "blocks": [ + { + "bbox": [ + 313, + 70, + 555, + 92 + ], + "lines": [ + { + "bbox": [ + 313, + 70, + 555, + 92 + ], + "spans": [ + { + "bbox": [ + 313, + 70, + 555, + 92 + ], + "type": "text", + "content": "Table 6. Accuracy comparison when replacing different LLMs in SRD module on Nr3D [1]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 366, + 93, + 502, + 162 + ], + "lines": [ + { + "bbox": [ + 366, + 93, + 502, + 162 + ], + "spans": [ + { + "bbox": [ + 366, + 93, + 502, + 162 + ], + "type": "table", + "html": "
LLM decouplerAccuracy
OpenChat [40]69.6%
DeepSeek-R1 [28]69.9%
Qwen-Plus [46]70.5%
Qwen-Turbo [46]70.7%
", + "image_path": "ea5d2acf9fbbfec0826a369b9b0d23fee2d024f58eff528ace7264c2a8d6bde1.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "spans": [ + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "text", + "content": "views, performance improves from " + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "inline_equation", + "content": "64.4\\%" + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "text", + "content": " (1 view) to " + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "inline_equation", + "content": "67.7\\%" + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "text", + "content": " (2 views), but plateaus at " + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "inline_equation", + "content": "68.4\\%" + }, + { + "bbox": [ + 313, + 171, + 553, + 266 + ], + "type": "text", + "content": " with 8 views, suggesting diminishing returns. Notably, four views offer a strong tradeoff, capturing diverse spatial information with minimal redundancy and maintaining computational efficiency. This also suggests that uniformly attending to many views may dilute focus on key perspectives. Future work will explore adaptive view selection." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "spans": [ + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "text", + "content": "Analysis of SRD's LLM Decoupler. In this paper, we employ the open-source DeepSeek-R1 [28] as the LLM in the SRD module and further investigate the impact of different LLMs on the final performance of 3DVG. As shown in Table 6, different LLM decouplers exhibit varying levels of effectiveness in the sentence decoupling task. Models with stronger decoupling capabilities yield better results. For instance, OpenChat [40] and DeepSeek-R1 [28] achieve accuracies of " + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "inline_equation", + "content": "69.6\\%" + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "inline_equation", + "content": "69.9\\%" + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "text", + "content": ", respectively, while models designed with enhanced sentence decoupling capabilities, such as Qwen-Plus [46] and Qwen-Turbo [46], achieve " + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "inline_equation", + "content": "70.5\\%" + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "inline_equation", + "content": "70.7\\%" + }, + { + "bbox": [ + 313, + 267, + 555, + 471 + ], + "type": "text", + "content": ", with Qwen-Turbo demonstrating the highest performance. These results indicate that as an LLM's ability to disentangle complex sentence structures improves, it becomes more effective at isolating and extracting relevant information, ultimately leading to significant gains in 3DVG accuracy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 481, + 388, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 481, + 388, + 494 + ], + "spans": [ + { + "bbox": [ + 314, + 481, + 388, + 494 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 501, + 553, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 553, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 553, + 632 + ], + "type": "text", + "content": "In this paper, we introduce ViewSRD, a framework that disentangles target-anchor relationships via the Simple Relation Decoupling (SRD) module and enhances multi-view understanding through the Multi-view Textual-Scene Interaction (Multi-TSI) module. By decomposing complex multi-anchor queries into simpler single-anchor sentences, SRD clarifies positional relationships, while Multi-TSI integrates textual and scene features across viewpoints using cross-modal consistent view tokens (CCVTs) to capture spatial interactions. Extensive experiments demonstrate ViewSRD's state-of-the-art performance in 3DVG." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 633, + 553, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 633, + 553, + 705 + ], + "spans": [ + { + "bbox": [ + 313, + 633, + 553, + 705 + ], + "type": "text", + "content": "A limitation of ViewSRD is its assumption that complex queries can be fully decomposed without overlapping relationships. While the decomposition into overlapping relations does not degrade performance, it diminishes the intended benefits of simplification. Future work will explore adaptive query to better preserve contextual dependencies." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9733" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 297, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 297, + 217 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 297, + 217 + ], + "type": "text", + "content": "Acknowledgements. This work is supported by Guangdong Provincial Natural Science Foundation for Outstanding Youth Team Project (No. 2024B1515040010), NSFC Key Project (No. U23A20391), China National Key R&D Program (Grant No. 2023YFE0202700, 2024YFB4709200), Key-Area Research and Development Program of Guangzhou City (No. 2023B01J0022), Guangdong Natural Science Funds for Distinguished Young Scholars (Grant 2023B1515020097), the National Research Foundation, Singapore under its AI Singapore Programme (AISG Award No.: AISG3-GV-2023-011), the Singapore Ministry of Education AcRF Tier 1 Grant (Grant No.: MSS25C004), and the Lee Kong Chian Fellowships." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 237, + 115, + 250 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 237, + 115, + 250 + ], + "spans": [ + { + "bbox": [ + 56, + 237, + 115, + 250 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 257, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 61, + 257, + 294, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 257, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 61, + 257, + 294, + 300 + ], + "type": "text", + "content": "[1] Panos Achlioptas, Ahmed Abdelreehm, Fei Xia, Mohamed Elhoseiny, and Leonidas Guibas. Referit3d: Neural listeners for fine-grained 3d object identification in real-world scenes. In ECCV, pages 422-440, 2020. 2, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 302, + 296, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 302, + 296, + 356 + ], + "spans": [ + { + "bbox": [ + 61, + 302, + 296, + 356 + ], + "type": "text", + "content": "[2] Eslam Mohamed Bakr, Mohamed Ayman, Mahmoud Ahmed, Habib Slim, and Mohamed Elhoseiny. Cot3dref: Chain-of-thoughts data-efficient 3d visual grounding. The Twelfth International Conference on Learning Representations, 2024. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 357, + 296, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 357, + 296, + 401 + ], + "spans": [ + { + "bbox": [ + 61, + 357, + 296, + 401 + ], + "type": "text", + "content": "[3] Christopher Beckham, Martin Weiss, Florian Golemo, Sina Honari, Derek Nowrouzehrai, and Christopher Pal. Visual question answering from another perspective: Clevr mental rotation tests. Pattern Recognition, 136:109209, 2023. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 402, + 296, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 402, + 296, + 468 + ], + "spans": [ + { + "bbox": [ + 61, + 402, + 296, + 468 + ], + "type": "text", + "content": "[4] Francisco Maria Calisto, João Fernandes, Margarida Morais, Carlos Santiago, João Maria Abrantes, Nuno Nunes, and Jacinto C Nascimento. Assertiveness-based agent communication for a personalized medicine on medical imaging diagnosis. In Proceedings of the 2023 CHI conference on human factors in computing systems, pages 1-20, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 468, + 296, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 468, + 296, + 523 + ], + "spans": [ + { + "bbox": [ + 62, + 468, + 296, + 523 + ], + "type": "text", + "content": "[5] Chun-Peng Chang, Shaoxiang Wang, Alain Pagani, and Didier Stricker. Mikasa: Multi-key-anchor & scene-aware transformer for 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14131-14140, 2024. 1, 3, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 524, + 294, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 524, + 294, + 557 + ], + "spans": [ + { + "bbox": [ + 62, + 524, + 294, + 557 + ], + "type": "text", + "content": "[6] Dave Zhenyu Chen, Angel X Chang, and Matthias Nießner. Scanrefer: 3d object localization in rgb-d scans using natural language. In ECCV, pages 202-221. Springer, 2020. 2, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 558, + 294, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 558, + 294, + 612 + ], + "spans": [ + { + "bbox": [ + 62, + 558, + 294, + 612 + ], + "type": "text", + "content": "[7] Weihong Chen, Xuemiao Xu, Haoxin Yang, Yi Xie, Peng Xiao, Cheng Xu, Huaidong Zhang, and Pheng-Ann Heng. Scjd: Sparse correlation and joint distillation for efficient 3d human pose estimation. arXiv preprint arXiv:2503.14097, 2025. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 613, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 613, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 62, + 613, + 294, + 689 + ], + "type": "text", + "content": "[8] Tushar Choudhary, Vikrant Dewangan, Shivam Chandhok, Shubham Priyadarshan, Anushka Jain, Arun K Singh, Siddharth Srivastava, Krishna Murthy Jatavallabhula, and K Madhava Krishna. Talk2bev: Language-enhanced bird's-eye view maps for autonomous driving. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 16345-16352. IEEE, 2024. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 62, + 691, + 294, + 713 + ], + "type": "text", + "content": "[9] Can Cui, Yunsheng Ma, Xu Cao, Wenqian Ye, and Ziran Wang. Drive as you speak: Enabling human-like interac" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "text", + "content": "tion with large language models in autonomous vehicles. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 902-909, 2024. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 106, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 106, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 106, + 553, + 150 + ], + "type": "text", + "content": "[10] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, pages 5828-5839, 2017. 2, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 151, + 553, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 151, + 553, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 151, + 553, + 194 + ], + "type": "text", + "content": "[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 194, + 553, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 194, + 553, + 237 + ], + "spans": [ + { + "bbox": [ + 316, + 194, + 553, + 237 + ], + "type": "text", + "content": "[12] Azade Farshad, Yousef Yeganeh, Yu Chi, Chengzhi Shen, Böjrn Ommer, and Nassir Navab. Scenegenie: Scene graph guided diffusion models for image synthesis. In ICCV, pages 88-98, 2023. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 238, + 553, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 238, + 553, + 282 + ], + "spans": [ + { + "bbox": [ + 316, + 238, + 553, + 282 + ], + "type": "text", + "content": "[13] Shuo Feng, Haowei Sun, Xintao Yan, Haojie Zhu, Zhengxia Zou, Shengyin Shen, and Henry X Liu. Dense reinforcement learning for safety validation of autonomous vehicles. Nature, 615(7953):620-627, 2023. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 283, + 553, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 283, + 553, + 315 + ], + "spans": [ + { + "bbox": [ + 316, + 283, + 553, + 315 + ], + "type": "text", + "content": "[14] Liang Geng and Jianqin Yin. Viewinfer3d: 3d visual grounding based on embodied viewpoint inference. IEEE Robotics and Automation Letters, 2024. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 316, + 553, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 316, + 553, + 360 + ], + "spans": [ + { + "bbox": [ + 316, + 316, + 553, + 360 + ], + "type": "text", + "content": "[15] Zoey Guo, Yiwen Tang, Ray Zhang, Dong Wang, Zhigang Wang, Bin Zhao, and Xuelong Li. Viewrefer: Grasp the multi-view knowledge for 3d visual grounding. In ICCV, pages 15372-15383, 2023. 1, 3, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 361, + 553, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 361, + 553, + 405 + ], + "spans": [ + { + "bbox": [ + 316, + 361, + 553, + 405 + ], + "type": "text", + "content": "[16] Dailan He, Yusheng Zhao, Junyu Luo, Tianrui Hui, Shaofei Huang, Aixi Zhang, and Si Liu. Transrefer3d: Entity-andrelation aware transformer for fine-grained 3d visual grounding. In ACM MM, pages 2344-2352, 2021. 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 405, + 553, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 405, + 553, + 459 + ], + "spans": [ + { + "bbox": [ + 316, + 405, + 553, + 459 + ], + "type": "text", + "content": "[17] Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023. 1, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 459, + 553, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 459, + 553, + 492 + ], + "spans": [ + { + "bbox": [ + 316, + 459, + 553, + 492 + ], + "type": "text", + "content": "[18] Shijia Huang, Yilun Chen, Jiaya Jia, and Liwei Wang. Multiview transformer for 3d visual grounding. In CVPR, pages 15524-15533, 2022. 2, 3, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 492, + 553, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 492, + 553, + 548 + ], + "spans": [ + { + "bbox": [ + 316, + 492, + 553, + 548 + ], + "type": "text", + "content": "[19] Yufeng Huang, Jiji Tang, Zhuo Chen, Rongsheng Zhang, Xinfeng Zhang, Weijie Chen, Zeng Zhao, Zhou Zhao, Tangjie Lv, Zhipeng Hu, et al. Structure-clip: Towards scene graph knowledge to enhance multi-modal structured representations. In AAAI, pages 2417–2425, 2024. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 548, + 553, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 548, + 553, + 581 + ], + "spans": [ + { + "bbox": [ + 316, + 548, + 553, + 581 + ], + "type": "text", + "content": "[20] Zixin Huang, Xuesong Tao, and Xinyuan Liu. Nan-detr: noising multi-anchor makes detr better for object detection. Frontiers in Neurorobotics, 18:1484088, 2024. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 581, + 553, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 581, + 553, + 635 + ], + "spans": [ + { + "bbox": [ + 316, + 581, + 553, + 635 + ], + "type": "text", + "content": "[21] Ayush Jain, Nikolaos Gkanatsios, Ishita Mediratta, and Katerina Fragkiadaki. Bottom up top down detection transformers for language grounding in images and point clouds. In European Conference on Computer Vision, pages 417-433. Springer, 2022. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 636, + 553, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 636, + 553, + 691 + ], + "spans": [ + { + "bbox": [ + 316, + 636, + 553, + 691 + ], + "type": "text", + "content": "[22] Xinjie Jiang, Chenxi Zheng, Xuemiao Xu, Bangzhen Liu, Weiying Zheng, Huaidong Zhang, and Shengfeng He. Vrdone: One-stage video visual relation detection. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 1437-1446, 2024. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "text", + "content": "[23] Jingdan Kang, Haoxin Yang, Yan Cai, Huaidong Zhang, Xuemiao Xu, Yong Du, and Shengfeng He. Sita: Structurally" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "9734" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 106 + ], + "type": "text", + "content": "imperceptible and transferable adversarial attacks for stylized image generation. IEEE Transactions on Information Forensics and Security, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 106, + 295, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 106, + 295, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 106, + 295, + 139 + ], + "type": "text", + "content": "[24] Sanjoy Kundu and Sathyanarayanan N Aakur. Is-ggt: Iterative scene graph generation with generative transformers. In CVPR, pages 6292-6301, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 140, + 294, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 294, + 183 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 294, + 183 + ], + "type": "text", + "content": "[25] Jialu Li and Mohit Bansal. Panogen: Text-conditioned panoramic environment generation for vision-and-language navigation. Advances in Neural Information Processing Systems, 36, 2024. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 184, + 295, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 184, + 295, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 184, + 295, + 239 + ], + "type": "text", + "content": "[26] Yihong Lin, Xuemiao Xu, Huaidong Zhang, Cheng Xu, Weijie Li, Yi Xie, Jing Qin, and Shengfeng He. Delving into invisible semantics for generalized one-shot neural human rendering. IEEE Transactions on Visualization and Computer Graphics, 2025. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 239, + 295, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 239, + 295, + 293 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 295, + 293 + ], + "type": "text", + "content": "[27] Xiongkun Linghu, Jiangyong Huang, Xuesong Niu, Xiaojian Shawn Ma, Baoxiong Jia, and Siyuan Huang. Multimodal situated reasoning in 3d scenes. Advances in Neural Information Processing Systems, 37:140903-140936, 2025. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 293, + 295, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 293, + 295, + 338 + ], + "spans": [ + { + "bbox": [ + 56, + 293, + 295, + 338 + ], + "type": "text", + "content": "[28] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 2, 6, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 338, + 295, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 338, + 295, + 392 + ], + "spans": [ + { + "bbox": [ + 56, + 338, + 295, + 392 + ], + "type": "text", + "content": "[29] Bangzhen Liu, Chenxi Zheng, Xuemiao Xu, Cheng Xu, Huaidong Zhang, and Shengfeng He. Rotation-adaptive point cloud domain generalization via intricate orientation learning. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 393, + 295, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 393, + 295, + 415 + ], + "spans": [ + { + "bbox": [ + 56, + 393, + 295, + 415 + ], + "type": "text", + "content": "[30] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 416, + 295, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 416, + 295, + 480 + ], + "spans": [ + { + "bbox": [ + 56, + 416, + 295, + 480 + ], + "type": "text", + "content": "[31] Ruiyuan Lyu, Jingli Lin, Tai Wang, Xiaohan Mao, Yilun Chen, Runsen Xu, Haifeng Huang, Chenming Zhu, Dahua Lin, and Jiangmiao Pang. Mmscan: A multi-modal 3d scene dataset with hierarchical grounded language annotations. Advances in Neural Information Processing Systems, 37:50898-50924, 2025. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 481, + 295, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 481, + 295, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 481, + 295, + 525 + ], + "type": "text", + "content": "[32] Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 526, + 295, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 526, + 295, + 580 + ], + "spans": [ + { + "bbox": [ + 56, + 526, + 295, + 580 + ], + "type": "text", + "content": "[33] Honghan Pan, Bangzhen Liu, Xuemiao Xu, Chenxi Zheng, Yongwei Nie, and Shengfeng He. Gaussian prompter: Linking 2d prompts for 3d gaussian segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2025. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 581, + 295, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 581, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 56, + 581, + 295, + 624 + ], + "type": "text", + "content": "[34] Itthisak Phueaksri, Marc A Kastner, Yasutomo Kawanishi, Takahiro Komamizu, and Ichiro Ide. An approach to generate a caption for an image collection using scene graph generation. IEEE Access, 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 625, + 295, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 625, + 295, + 669 + ], + "spans": [ + { + "bbox": [ + 56, + 625, + 295, + 669 + ], + "type": "text", + "content": "[35] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 670, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 670, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 670, + 295, + 713 + ], + "type": "text", + "content": "[36] Junha Roh, Karthik Desingh, Ali Farhadi, and Dieter Fox. Languagerefer: Spatial-language model for 3d visual grounding. In Conference on Robot Learning, pages 1046-1056. PMLR, 2022. 6, 7" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 554, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 316, + 72, + 554, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 554, + 117 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 554, + 117 + ], + "type": "text", + "content": "[37] Xiangxi Shi, Zhonghua Wu, and Stefan Lee. Aware visual grounding in 3d scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14056-14065, 2024. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 118, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 553, + 150 + ], + "type": "text", + "content": "[38] Yashar Talebirad and Amirhossein Nadiri. Multi-agent collaboration: Harnessing the power of intelligent llm agents. arXiv preprint arXiv:2306.03314, 2023. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 151, + 553, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 151, + 553, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 151, + 553, + 194 + ], + "type": "text", + "content": "[39] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 194, + 553, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 194, + 553, + 238 + ], + "spans": [ + { + "bbox": [ + 316, + 194, + 553, + 238 + ], + "type": "text", + "content": "[40] Guan Wang, Sijie Cheng, Xianyuan Zhan, Xiangang Li, Sen Song, and Yang Liu. Openchat: Advancing open-source language models with mixed-quality data. arXiv preprint arXiv:2309.11235, 2023. 2, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 239, + 553, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 239, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 316, + 239, + 553, + 293 + ], + "type": "text", + "content": "[41] Yuan Wang, Yali Li, and Shengjin Wang. G^3-lq: Marrying hyperbolic alignment with explicit semantic-geometric modeling for 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13917–13926, 2024. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 293, + 553, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 293, + 553, + 338 + ], + "spans": [ + { + "bbox": [ + 316, + 293, + 553, + 338 + ], + "type": "text", + "content": "[42] Yanmin Wu, Xinhua Cheng, Renrui Zhang, Zesen Cheng, and Jian Zhang. Eda: Explicit text-decoupling and dense alignment for 3d visual grounding. In CVPR, pages 19231-19242, 2023. 1, 2, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 338, + 553, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 553, + 381 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 553, + 381 + ], + "type": "text", + "content": "[43] Cheng Xu, Wei Qu, Xuemiao Xu, and Xueling Liu. Multiscale flow-based occluding effect and content separation for cartoon animations. IEEE Transactions on Visualization and Computer Graphics, 29(9):4001-4014, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 383, + 553, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 383, + 553, + 436 + ], + "spans": [ + { + "bbox": [ + 316, + 383, + 553, + 436 + ], + "type": "text", + "content": "[44] Can Xu, Yuehui Han, Rui Xu, Le Hui, Jin Xie, and Jian Yang. Multi-attribute interactions matter for 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17253-17262, 2024. 1, 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 437, + 553, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 437, + 553, + 492 + ], + "spans": [ + { + "bbox": [ + 316, + 437, + 553, + 492 + ], + "type": "text", + "content": "[45] Yingjie Xu, Bangzhen Liu, Hao Tang, Bailin Deng, and Shengfeng He. Learning with unreliability: Fast few-shot voxel radiance fields with relative geometric consistency. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20342-20351, 2024. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 493, + 553, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 493, + 553, + 536 + ], + "spans": [ + { + "bbox": [ + 316, + 493, + 553, + 536 + ], + "type": "text", + "content": "[46] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. 2, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 537, + 553, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 537, + 553, + 591 + ], + "spans": [ + { + "bbox": [ + 316, + 537, + 553, + 591 + ], + "type": "text", + "content": "[47] Haoxin Yang, Xuemiao Xu, Cheng Xu, Huaidong Zhang, Jing Qin, Yi Wang, Pheng-Ann Heng, and Shengfeng He. G2face: High-fidelity reversible face anonymization via generative and geometric priors. IEEE Transactions on Information Forensics and Security, 2024. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 592, + 553, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 592, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 316, + 592, + 553, + 624 + ], + "type": "text", + "content": "[48] Zhengyuan Yang, Songyang Zhang, Liwei Wang, and Jiebo Luo. Sat: 2d semantics assisted training for 3d visual grounding. In ICCV, pages 1856-1866, 2021. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 625, + 553, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 625, + 553, + 667 + ], + "spans": [ + { + "bbox": [ + 316, + 625, + 553, + 667 + ], + "type": "text", + "content": "[49] Zhao Yang, Jiaxuan Liu, Yucheng Han, Xin Chen, Zebiao Huang, Bin Fu, and Gang Yu. Appagent: Multimodal agents as smartphone users. arXiv preprint arXiv:2312.13771, 2023. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "type": "text", + "content": "[50] Qifan Yu, Juncheng Li, Yu Wu, Siliang Tang, Wei Ji, and Yueting Zhuang. Visually-prompted language model for fine-grained scene graph generation in an open world. In ICCV, pages 21560-21571, 2023. 2" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9735" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 475 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 56, + 73, + 297, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 297, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 297, + 127 + ], + "type": "text", + "content": "[51] Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024. 1, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "type": "text", + "content": "[52] Yiming Zhang, ZeMing Gong, and Angel X Chang. Multi3drefer: Grounding text description to multiple 3d objects. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15225-15236, 2023. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 175, + 294, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 228 + ], + "type": "text", + "content": "[53] Yuqi Zhang, Han Luo, and Yinjie Lei. Towards clip-driven language-free 3d visual grounding via 2d-3d relational enhancement and consistency. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13063–13072, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 230, + 294, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 294, + 262 + ], + "type": "text", + "content": "[54] Lichen Zhao, Daigang Cai, Lu Sheng, and Dong Xu. 3dvg-transformer: Relation modeling for visual grounding on point clouds. In ICCV, pages 2928-2937, 2021. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 264, + 294, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 294, + 317 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 294, + 317 + ], + "type": "text", + "content": "[55] Chenxi Zheng, Yihong Lin, Bangzhen Liu, Xuemiao Xu, Yongwei Nie, and Shengfeng He. Recdreamer: Consistent text-to-3d generation via uniform score distillation. In The Thirteenth International Conference on Learning Representations. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 319, + 294, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 319, + 294, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 319, + 294, + 373 + ], + "type": "text", + "content": "[56] Chenxi Zheng, Bangzhen Liu, Xuemiao Xu, Huaidong Zhang, and Shengfeng He. Learning an interpretable stylized subspace for 3d-aware animatable artforms. IEEE Transactions on Visualization and Computer Graphics, 31(2):1465-1477, 2024. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 375, + 294, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 375, + 294, + 418 + ], + "spans": [ + { + "bbox": [ + 56, + 375, + 294, + 418 + ], + "type": "text", + "content": "[57] Gengze Zhou, Yicong Hong, and Qi Wu. Navgpt: Explicit reasoning in vision-and-language navigation with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 7641-7649, 2024. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 421, + 294, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 421, + 294, + 475 + ], + "spans": [ + { + "bbox": [ + 56, + 421, + 294, + 475 + ], + "type": "text", + "content": "[58] Ziyu Zhu, Zhuofan Zhang, Xiaojian Ma, Xuesong Niu, Yixin Chen, Baoxiong Jia, Zhidong Deng, Siyuan Huang, and Qing Li. Unifying 3d vision-language understanding via promptable queries. In European Conference on Computer Vision, pages 188–206. Springer, 2024. 1" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "9736" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file