diff --git "a/2023/3D Concept Learning and Reasoning From Multi-View Images/layout.json" "b/2023/3D Concept Learning and Reasoning From Multi-View Images/layout.json" new file mode 100644--- /dev/null +++ "b/2023/3D Concept Learning and Reasoning From Multi-View Images/layout.json" @@ -0,0 +1,7345 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 104, + 103, + 489, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 103, + 489, + 121 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 489, + 121 + ], + "type": "text", + "content": "3D Concept Learning and Reasoning from Multi-View Images" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "spans": [ + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "Yining Hong" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Chunru Lin" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Yilun Du" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Zhenfang Chen" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Joshua B. Tenenbaum" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Chuang Gan" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{4,5}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "UCLA, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "Shanghai Jiaotong University, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "MIT CSAIL, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "UMass Amherst, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "MIT-IBM Watson AI Lab https://vis-www.cs.umass.edu/3d-clr/" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 242, + 179, + 425 + ], + "blocks": [ + { + "bbox": [ + 50, + 242, + 179, + 425 + ], + "lines": [ + { + "bbox": [ + 50, + 242, + 179, + 425 + ], + "spans": [ + { + "bbox": [ + 50, + 242, + 179, + 425 + ], + "type": "image", + "image_path": "0139c5dde162a2aa92dcfb3c8dcbb7922e85a282412b0fa7132181e9fb7d8996.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 430, + 168, + 464 + ], + "lines": [ + { + "bbox": [ + 50, + 430, + 168, + 464 + ], + "spans": [ + { + "bbox": [ + 50, + 430, + 168, + 464 + ], + "type": "text", + "content": "Concept: Q: Are there any televisions? A: Yes" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 170, + 428, + 293, + 476 + ], + "lines": [ + { + "bbox": [ + 170, + 428, + 293, + 476 + ], + "spans": [ + { + "bbox": [ + 170, + 428, + 293, + 476 + ], + "type": "text", + "content": "Counting: \nQ: How many chairs are close to the table in the room with plant on the cabinet? A: 6" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 170, + 488, + 279, + 510 + ], + "lines": [ + { + "bbox": [ + 170, + 488, + 279, + 510 + ], + "spans": [ + { + "bbox": [ + 170, + 488, + 279, + 510 + ], + "type": "text", + "content": "Q: How many rooms have sofas? A: 1" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 513, + 547, + 536 + ], + "lines": [ + { + "bbox": [ + 46, + 513, + 547, + 536 + ], + "spans": [ + { + "bbox": [ + 46, + 513, + 547, + 536 + ], + "type": "text", + "content": "Figure 1. An exemplar scene with multi-view images and question-answer pairs of our 3DMV-VQA dataset. 3DMV-VQA contains four question types: concept, counting, relation, comparison. Orange words denote semantic concepts; blue words denote the relations." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 185, + 243, + 272, + 332 + ], + "blocks": [ + { + "bbox": [ + 185, + 243, + 272, + 332 + ], + "lines": [ + { + "bbox": [ + 185, + 243, + 272, + 332 + ], + "spans": [ + { + "bbox": [ + 185, + 243, + 272, + 332 + ], + "type": "image", + "image_path": "9c2499a72b5232613521f83970886d73892accd35822abbe76380eef8f2aa6d4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 184, + 335, + 272, + 423 + ], + "blocks": [ + { + "bbox": [ + 184, + 335, + 272, + 423 + ], + "lines": [ + { + "bbox": [ + 184, + 335, + 272, + 423 + ], + "spans": [ + { + "bbox": [ + 184, + 335, + 272, + 423 + ], + "type": "image", + "image_path": "bcdc54a8ca47ecfc2e80d202e50dbd8f6ca69bcd75c2d68e299c3a97942513b0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 276, + 243, + 363, + 332 + ], + "blocks": [ + { + "bbox": [ + 276, + 243, + 363, + 332 + ], + "lines": [ + { + "bbox": [ + 276, + 243, + 363, + 332 + ], + "spans": [ + { + "bbox": [ + 276, + 243, + 363, + 332 + ], + "type": "image", + "image_path": "adb9c46914582c93ffb458cac411cec963fc04c004c0d353d24ebdb993d37a5a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 276, + 335, + 362, + 422 + ], + "blocks": [ + { + "bbox": [ + 276, + 335, + 362, + 422 + ], + "lines": [ + { + "bbox": [ + 276, + 335, + 362, + 422 + ], + "spans": [ + { + "bbox": [ + 276, + 335, + 362, + 422 + ], + "type": "image", + "image_path": "8b29d6f5ef01af8dd76de5ff59d2297cdec550b6c7ae842222df4b69bb5b62d8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 295, + 429, + 416, + 476 + ], + "lines": [ + { + "bbox": [ + 295, + 429, + 416, + 476 + ], + "spans": [ + { + "bbox": [ + 295, + 429, + 416, + 476 + ], + "type": "text", + "content": "Relation: Q: Facing the computer from the curtain, is there a lamp on the right? A: Yes" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 295, + 489, + 408, + 510 + ], + "lines": [ + { + "bbox": [ + 295, + 489, + 408, + 510 + ], + "spans": [ + { + "bbox": [ + 295, + 489, + 408, + 510 + ], + "type": "text", + "content": "Q: What's on the cabinet in the smaller room? A: Plant" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 365, + 243, + 453, + 332 + ], + "blocks": [ + { + "bbox": [ + 365, + 243, + 453, + 332 + ], + "lines": [ + { + "bbox": [ + 365, + 243, + 453, + 332 + ], + "spans": [ + { + "bbox": [ + 365, + 243, + 453, + 332 + ], + "type": "image", + "image_path": "49645c25bde17b0db6d9b773fde5388ec829dbf8fc93453c2f9d34d1a84bd58e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 365, + 335, + 452, + 422 + ], + "blocks": [ + { + "bbox": [ + 365, + 335, + 452, + 422 + ], + "lines": [ + { + "bbox": [ + 365, + 335, + 452, + 422 + ], + "spans": [ + { + "bbox": [ + 365, + 335, + 452, + 422 + ], + "type": "image", + "image_path": "533b12da53016d7d003d444faa1199643658affa31d497d3c6002146c1f27325.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 418, + 429, + 541, + 510 + ], + "lines": [ + { + "bbox": [ + 418, + 429, + 541, + 510 + ], + "spans": [ + { + "bbox": [ + 418, + 429, + 541, + 510 + ], + "type": "text", + "content": "Comparison: \nQ: Are there fewer pictures in the larger room than the other room? A: No \nQ: Is the computer closer to a printer or a lamp? \nA: Printer" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 456, + 244, + 543, + 332 + ], + "blocks": [ + { + "bbox": [ + 456, + 244, + 543, + 332 + ], + "lines": [ + { + "bbox": [ + 456, + 244, + 543, + 332 + ], + "spans": [ + { + "bbox": [ + 456, + 244, + 543, + 332 + ], + "type": "image", + "image_path": "874bbee417fccf7c5a5cbc21ef58df9624603c16927e8facc7e608b9c78004c5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 456, + 335, + 543, + 422 + ], + "blocks": [ + { + "bbox": [ + 456, + 335, + 543, + 422 + ], + "lines": [ + { + "bbox": [ + 456, + 335, + 543, + 422 + ], + "spans": [ + { + "bbox": [ + 456, + 335, + 543, + 422 + ], + "type": "image", + "image_path": "8c5023c7c3bb44a5cbe06b847a2eb0751a7ff0bfc87e1dd62dc7e15795a12609.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 143, + 555, + 192, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 555, + 192, + 568 + ], + "spans": [ + { + "bbox": [ + 143, + 555, + 192, + 568 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "Humans are able to accurately reason in 3D by gathering multi-view observations of the surrounding world. Inspired by this insight, we introduce a new large-scale benchmark for 3D multi-view visual question answering (3DMV-VQA). This dataset is collected by an embodied agent actively moving and capturing RGB images in an environment using the Habitat simulator. In total, it consists of approximately 5k scenes, 600k images, paired with 50k questions. We evaluate various state-of-the-art models for visual reasoning on our benchmark and find that they all perform poorly. We suggest" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "type": "text", + "content": "that a principled approach for 3D reasoning from multi-view images should be to infer a compact 3D representation of the world from the multi-view images, which is further grounded on open-vocabulary semantic concepts, and then to execute reasoning on these 3D representations. As the first step towards this approach, we propose a novel 3D concept learning and reasoning (3D-CLR) framework that seamlessly combines these components via neural fields, 2D pre-trained vision-language models, and neural reasoning operators. Experimental results suggest that our framework outperforms baseline models by a large margin, but the challenge remains largely unsolved. We further perform an in-depth analysis of the challenges and highlight potential future directions." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9202" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 127, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 127, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 127, + 83 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 289, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 289, + 258 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 289, + 258 + ], + "type": "text", + "content": "Visual reasoning, the ability to composite rules on internal representations to reason and answer questions about visual scenes, has been a long-standing challenge in the field of artificial intelligence and computer vision. Several datasets [23, 33, 69] have been proposed to tackle this challenge. However, they mainly focus on visual reasoning on 2D single-view images. Since 2D single-view images only cover a limited region of the whole space, such reasoning inevitably has several weaknesses, including occlusion, and failing to answer 3D-related questions about the entire scene that we are interested in. As shown in Fig. 1, it's difficult, even for humans, to count the number of chairs in a scene due to the object occlusion, and it's even harder to infer 3D relations like \"closer\" from a single-view 2D image." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 259, + 289, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 259, + 289, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 259, + 289, + 426 + ], + "type": "text", + "content": "On the other hand, there's strong psychological evidence that human beings conduct visual reasoning in the underlying 3D representations [55]. Recently, there have been several works focusing on 3D visual question answering [2,16,62,64]. They mainly use traditional 3D representations (e.g., point clouds) for visual reasoning. This is inconsistent with the way human beings perform 3D reasoning in real life. Instead of being given an entire 3D representation of the scene at once, humans will actively walk around and explore the whole environment, ingesting image observations from different views and converting them into a holistic 3D representation that assists them in understanding and reasoning about the environment. Such abilities are crucial for many embodied AI applications, such as building assistive robots." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 426, + 289, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 289, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 289, + 641 + ], + "type": "text", + "content": "To this end, we propose the novel task of 3D visual reasoning from multi-view images taken by active exploration of an embodied agent. Specifically, we generate a large-scale benchmark, 3DMV-VQA (3D multi-view visual question answering), that contains approximately 5k scenes and 50k question-answering pairs about these scenes. For each scene, we provide a collection of multi-view image observations. We generate this dataset by placing an embodied agent in the Habitat-Matterport environment [47], which actively explores the environment and takes pictures from different views. We also obtain scene graph annotations from the Habitat-Matterport 3D semantics dataset (HM3DSem) [61], including ground-truth locations, segmentations, semantic information of the objects, as well as relationships among the objects in the environments, for model diagnosis. To evaluate the models' 3D reasoning abilities on the entire environment, we design several 3D-related question types, including concept, counting, relation and comparison." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 642, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 289, + 714 + ], + "type": "text", + "content": "Given this new task, the key challenges we would like to investigate include: 1) how to efficiently obtain the compact visual representation to encode crucial properties (e.g., semantics and relations) by integrating all incomplete observations of the environment in the process of active exploration for 3D visual reasoning? 2) How to ground the semantic con" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "type": "text", + "content": "cepts on these 3D representations that could be leveraged for downstream tasks, such as visual reasoning? 3) How to infer the relations among the objects, and perform step-by-step reasoning?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 121, + 547, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 547, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 547, + 443 + ], + "type": "text", + "content": "As the first step to tackling these challenges, we propose a novel model, 3D-CLR (3D Concept Learning and Reasoning). First, to efficiently obtain a compact 3D representation from multi-view images, we use a neural-field model based on compact voxel grids [57] which is both fast to train and effective at storing scene properties in its voxel grids. As for concept learning, we observe that previous works on 3D scene understanding [1,3] lack the diversity and scale with regard to semantic concepts due to the limited amount of paired 3D-and-language data. Although large-scale vision-language models (VLMs) have achieved impressive performances for zero-shot semantic grounding on 2D images, leveraging these pretrained models for effective open-vocabulary 3D grounding of semantic concepts remains a challenge. To address these challenges, we propose to encode the features of a pre-trained 2D vision-language model (VLM) into the compact 3D representation defined across voxel locations. Specifically, we use the CLIP-LSeg [37] model to obtain features on multi-view images, and propose an alignment loss to map the features in our 3D voxel grid to 2D pixels. By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we can ground the semantic concepts in the 3D compact representation. Finally, to answer the questions, we introduce a set of neural reasoning operators, including FILTER, COUNT, RELATION operators and so on, which take the 3D representations of different objects as input and output the predictions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 444, + 547, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 444, + 547, + 527 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 547, + 527 + ], + "type": "text", + "content": "We conduct experiments on our proposed 3DMV-VQA benchmark. Experimental results show that our proposed 3D-CLR outperforms all baseline models a lot. However, failure cases and model diagnosis show that challenges still exist concerning the grounding of small objects and the separation of close object instances. We provide an in-depth analysis of the challenges and discuss potential future directions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 528, + 547, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 528, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 306, + 528, + 547, + 540 + ], + "type": "text", + "content": "To sum up, we have the following contributions in this paper." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 555, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 306, + 555, + 545, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 555, + 545, + 579 + ], + "spans": [ + { + "bbox": [ + 306, + 555, + 545, + 579 + ], + "type": "text", + "content": "- We propose the novel task of 3D concept learning and reasoning from multi-view images." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 584, + 547, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 584, + 547, + 620 + ], + "spans": [ + { + "bbox": [ + 306, + 584, + 547, + 620 + ], + "type": "text", + "content": "- By having robots actively explore the embodied environments, we collect a large-scale benchmark on 3D multiview visual question answering (3DMV-VQA)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 624, + 547, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 624, + 547, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 624, + 547, + 684 + ], + "type": "text", + "content": "- We devise a model that incorporates a neural radiance field, 2D pretrained vision and language model, and neural reasoning operators to ground the concepts and perform 3D reasoning on the multi-view images. We illustrate that our model outperforms all baseline models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "- We perform an in-depth analysis of the challenges of this new task and highlight potential future directions." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9203" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 134, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 134, + 84 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 134, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 289, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 289, + 449 + ], + "type": "text", + "content": "Visual Reasoning There have been numerous tasks focusing on learning visual concepts from natural language, including visually-grounded question answering [18, 19], text-image retrieval [59] and so on. Visual reasoning has drawn much attention recently as it requires human-like understanding of the visual scene. A wide variety of benchmarks have been created over the recent years [7, 8, 23, 27, 33, 69]. However, they mainly focus on visual reasoning from 2D single-view images, while there's strong psychological evidence that human beings perform visual reasoning on the underlying 3D representations. In this paper, we propose the novel task of visual reasoning from multi-view images, and collect a large-scale benchmark for this task. In recent years, numerous visual reasoning models have also been proposed, ranging from attention-based methods [5, 30], graph-based methods [28], to models based on large pretrained vision-language model [9, 38]. These methods model the reasoning process implicitly with neural networks. Neural-symbolic methods [6, 40, 65] explicitly perform symbolic reasoning on the objects representations and language representations. They use perception models to extract 2D masks as a first step, and then execute operators and ground concepts on these pre-segmented masks, but are limited to a set of predefined concepts on simple scenes. [26] proposes to use the feature vectors from occupancy networks [42] to do visual reasoning in the 3D space. However, they also use a synthetic dataset, and learn a limited set of semantic concepts from scratch. We propose to learn 3D neural field features from 2D multi-view real-world images, and incorporate a 2D VLM for open-vocabulary reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 450, + 289, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 450, + 289, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 450, + 289, + 666 + ], + "type": "text", + "content": "3D Reasoning Understanding and reasoning about 3D scenes has been a long-standing challenge. Recent works focus on leveraging language to explore 3D scenes, such as object captioning [3,4] and object localization from language [1, 17, 29]. Our work is mostly related to 3D Visual Question Answering [2, 16, 62, 64] as we both focus on answering questions and reasoning about 3D scenes. However, these works use point clouds as 3D representations, which diverts from the way human beings perform 3D reasoning. Instead of being given an entire 3D representation all at once, human beings would actively move and explore the environment, integrating multi-view information to get a compact 3D representation. Therefore, we propose 3D reasoning from multi-view images. In addition, since 3D assets paired with natural language descriptions are hard to get in real-life scenarios, previous works struggle to ground open-vocabulary concepts. In our work, we leverage 2D VLMs for zero-shot open-vocabulary concept grounding in the 3D space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 666, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 288, + 715 + ], + "type": "text", + "content": "Embodied Reasoning Our work is also closely related to Embodied Question Answering (EQA) [11, 67] and Interactive Question Answering (IQA) [22, 35], which also involve an embodied agent exploring the environment and answering" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "content": "the question. However, the reasoning mainly focuses on the outcome or the history of the navigation on 2D images and does not require a holistic 3D understanding of the environment. There are also works [12, 20, 51, 54, 56, 68] targeting instruction following in embodied environments, in which an agent is asked to perform a series of tasks based on language instructions. Different from their settings, for our benchmark an embodied agent actively explores the environment and takes multi-view images for 3D-related reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 181, + 548, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 181, + 548, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 548, + 433 + ], + "type": "text", + "content": "Neural Fields Our approach utilizes neural fields to parameterize an underlying 3D compact representations of scenes for reasoning. Neural field models (e.g., [43]) have gained much popularity since they can reconstruct a volumetric 3D scene representation from a set of images. Recent works [21, 24, 57, 66] have pushed it further by using classic voxel-grids to explicitly store the scene properties (e.g., density, color and feature) for rendering, which allows for real-time rendering and is utilized by this paper. Neural fields have also been used to represent dynamic scenes [14, 44], appearance [43, 45, 49, 53, 63], physics [34], robotics [32, 52], acoustics [39] and more general multi-modal signals [13]. There are also some works that integrate semantics or language in neural fields [31, 60]. However, they mainly focus on using language for manipulation, editing or generation. [26] leverages neural descriptor field [52] for 3D concept grounding. However, they require ground-truth occupancy values to train the neural field, which can not be applied to real-world scenes. In this paper, we propose to leverage voxel-based neural radiance field [57] to get the compact representations for 3D visual reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 445, + 421, + 457 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 445, + 421, + 457 + ], + "spans": [ + { + "bbox": [ + 306, + 445, + 421, + 457 + ], + "type": "text", + "content": "3. Dataset Generation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 465, + 418, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 465, + 418, + 478 + ], + "spans": [ + { + "bbox": [ + 306, + 465, + 418, + 478 + ], + "type": "text", + "content": "3.1. Multi-View Images" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 484, + 547, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 547, + 533 + ], + "type": "text", + "content": "Our dataset includes 5k 3D scenes from the Habitat-Matterport 3D Dataset (HM3D) dataset [47], and approximately 600k images rendered from the 3D scenes. The images are rendered via Habitat [50, 58]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 533, + 548, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 548, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 548, + 640 + ], + "type": "text", + "content": "Scene Generation We build our benchmark on top of the HM3DSem dataset [61], which is a large-scale dataset of 3D real-world indoor scenes with densely annotated semantics. It consists of 142,646 object instance annotations across 216 3D spaces and 3,100 rooms within those spaces. HM3D dataset uses texture information to annotate pixel-accurate object boundaries, which provides large-scale object annotations and ensures the scale, quality, and diversity of 3D visual reasoning questions of our benchmark." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "type": "text", + "content": "To construct a benchmark that covers questions of different difficulty levels, it's crucial that we include 3D scenes of different scales in our benchmark. We start with single rooms in HM3D scenes, which has an appropriate amount of semantic concepts and relationships to base some simple questions on. To get the scale of single rooms, we calculate bounding" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "9204" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "boxes of rooms according to floor instance segmentations. We then proceed to generate bounding boxes for scenes with multiple adjacent rooms. For more complex holistic scene understanding, we also include whole-house scenes, which may contain tens of rooms. Overall, the 3DMV-VQA benchmark contains three levels of scenes (2000 single-room scenes, 2000 multi-room scenes and 100 whole-house scenes)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": "Image Rendering After we get the bounding box of each scene, we load the scene into the Habitat simulator. We also put a robot agent with an RGB sensor at a random initial point in the bounding box. The data is collected via exploration of the robot agent. Specifically, at each step of the data collection process, we sample a navigable point and make the agent move to the point along the shortest path. When the agent has arrived at a point, we rotate the agent " + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "inline_equation", + "content": "30^{\\circ}" + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": " along z-axis for 12 times so that the agent can observe the " + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": " view of the scene at the position. It can also look up and down, with a random mild angle from " + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "inline_equation", + "content": "[-10^{\\circ}, 10^{\\circ}]" + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": " along the x-axis. A picture is taken each time the agent rotates to a new orientation. In total 12 pictures are taken from each point. While traveling between points, the robot agent further takes pictures. We also exploit a policy such that when the camera is too far from or too close to an object and thus the agent cannot see anything, we discard the bad-view images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 365, + 180, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 180, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 180, + 376 + ], + "type": "text", + "content": "3.2. Questions and Answers" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 383, + 288, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 383, + 288, + 539 + ], + "spans": [ + { + "bbox": [ + 46, + 383, + 288, + 539 + ], + "type": "text", + "content": "We pair each scene with machine-generated questions from pre-defined templates. All questions are open-ended and can be answered with a single word (samples in Fig. 1). Concepts and Relationships To generate questions and answers, we utilize the semantic annotations of HM3DSem [61] to get the semantic concepts and their bounding boxes, as well as the bounding boxes of the rooms. We merge semantic concepts with similar meanings (e.g., L-shaped sofa to sofa, desk chair / computer chair e.g. to chair). We also define 11 relationships: inside, above, below, on the top of, close, far, large, small, between, on the left, and on the right. Before generating questions, we first generate a scene graph for each scene containing all concepts and relationships." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 539, + 288, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 288, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 288, + 562 + ], + "type": "text", + "content": "Question Types We define four types of questions: concept, counting, relation and comparison." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 562, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 47, + 562, + 288, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 288, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 288, + 597 + ], + "type": "text", + "content": "- Concept. Conceptual questions query if there's an object of a certain semantic concept in the scene, or whether there's a room containing the objects of the semantic concept." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 600, + 288, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 600, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 600, + 288, + 635 + ], + "type": "text", + "content": "- Counting. Counting-related questions ask about how many instances of a semantic concept are in the scene, or how many rooms contain objects of the semantic concept." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 639, + 288, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 639, + 288, + 687 + ], + "spans": [ + { + "bbox": [ + 47, + 639, + 288, + 687 + ], + "type": "text", + "content": "- Relation. Relational questions ask about the 11 relationships and their compositions. Based on the number of relations in a question, we have one-hop to three-hop questions for the relation type." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "content": "- Comparison. The comparison question type focuses on the comparison of two objects, two semantic concepts or two" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 310, + 72, + 547, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 547, + 133 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 547, + 133 + ], + "type": "text", + "content": "rooms. It can be combined with the relational concepts to compare two objects (e.g., larger, closer to, more left etc). It also compares the number of instances of two semantic concepts, or the number of objects of certain concepts in different rooms." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 140, + 547, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 140, + 547, + 416 + ], + "spans": [ + { + "bbox": [ + 304, + 140, + 547, + 416 + ], + "type": "text", + "content": "Bias Control. Similar to previous visual reasoning benchmarks [26, 33], we use machine-generated questions since the generation process is fully controllable so that we can avoid dataset bias. Questions are generated from pre-defined templates, and transformed into natural language questions with associated semantic concepts and relationships from the scene. We manually define 41 templates for question generation. We use depth-first search to generate questions. We perform bias control based on three perspectives: template counts, answer counts, and concept counts. For selecting templates, we sort the templates each time we generate a question to ensure a balanced question distribution. We force a flat answer distribution for each template by rejection sampling. Specifically, once we generate a question and an answer, if the number of the questions having the same answer and template is significantly larger than other answers, we discard it and continue searching. Once we find an answer that fits in the ideal answer distribution, we stop the depth-first searching for this question. We also force a flat concept distribution for each template using the same method. In addition to controlling the number of concepts mentioned in the templates, we also control the number of relation tuples consisting of the same concept sets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 426, + 361, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 361, + 439 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 361, + 439 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 447, + 547, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 447, + 547, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 447, + 547, + 651 + ], + "type": "text", + "content": "Fig. 2 illustrates an overview of our framework. Specifically, our framework consists of three steps. First, we learn a 3D compact representation from multi-view images using neural field. And then we propose to leverage pre-trained 2D vision-and-language model to ground concepts on 3D space. This is achieved by 1) generating 2D pixel features using CLIP-LSeg; 2) aligning the features of 3D voxel grid and 2D pixel features from CLIP-LSeg [37]; 3) dot-product attention between the 3D features and CLIP language features [37]. Finally, to perform visual reasoning, we propose neural reasoning operators, which execute the question step by step on the 3D compact representation and outputs a final answer. For example, we use FILTER operators to ground semantic concepts on the 3D representation, GETINSTANCE to get all instances of a semantic class, and COUNT_RELATION to count how many pairs of the two semantic classes have the queried relation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 658, + 541, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 541, + 672 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 541, + 672 + ], + "type": "text", + "content": "4.1. Learning 3D Compact Scene Representations" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Neural radiance fields [43] are capable of learning a 3D representation that can reconstruct a volumetric 3D scene representation from a set of images. Voxel-based meth" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9205" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 545, + 305 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 545, + 305 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 545, + 305 + ], + "type": "image", + "image_path": "19407b31f659eff8444b6c2a799e47318398d9458986c4f843c53129e65b011a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 309, + 546, + 365 + ], + "lines": [ + { + "bbox": [ + 46, + 309, + 546, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 309, + 546, + 365 + ], + "type": "text", + "content": "Figure 2. An overview of our 3D-CLR framework. First, we learn a 3D compact scene representation from multi-view images using neural fields (I). Second, we use CLIP-LSeg model to get per-pixel 2D features (II). We utilize a 3D-2D alignment loss to assign features to the 3D compact representation (III). By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we could get the concept grounding in 3D (IV). Finally, the reasoning process is performed via a set of neural reasoning operators, such as FILTER, GET instances and COUNT_RELATION (V). Relation operators are learned via relation networks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 372, + 289, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 372, + 289, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 372, + 289, + 563 + ], + "type": "text", + "content": "ods [21, 24, 57, 66] speed up the learning process by explicitly storing the scene properties (e.g., density, color and feature) in its voxel grids. We leverage Direct Voxel Grid Optimization (DVGO) [57] as our backbone for 3D compact representation for its fast speed. DVGO stores the learned density and color properties in its grid cells. The rendering of multi-view images is by interpolating through the voxel grids to get the density and color for each sampled point along each sampled ray, and integrating the colors based on the rendering alpha weights calculated from densities according to quadrature rule [41]. The model is trained by minimizing the L2 loss between the rendered multi-view images and the ground-truth multi-view images. By extracting the density voxel grid, we can get the 3D compact representation (e.g., By visualizing points with density greater than 0.5, we can get the 3D representation as shown in Fig. 2 I.)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 574, + 226, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 226, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 226, + 588 + ], + "type": "text", + "content": "4.2. 3D Semantic Concept Grounding" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "Once we extract the 3D compact representation of the scene, we need to ground the semantic concepts for reasoning from language. Recent work from [26] has proposed to ground concepts from paired 3D assets and question-answers. Though promising results have been achieved on synthetic data, it is not feasible for open-vocabulary 3D reasoning in real-world data, since it is hard to collect largescale 3D vision-and-language paired data. To address this challenge, our idea is to leverage pre-trained 2D vision and language model [46, 48] for 3D concept grounding in real-" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 372, + 547, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 547, + 516 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 547, + 516 + ], + "type": "text", + "content": "world scenes. But how can we map 2D concepts into 3D neural field representations? Note that 3D compact representations can be learned from 2D multi-view images and that each 2D pixel actually corresponds to several 3D points along the ray. Therefore, it's possible to get 3D features from 2D per-pixel features. Inspired by this, we first add a feature voxel grid representation to DVGO, in addition to density and color, to represent 3D features. We then apply CLIP-LSeg [37] to learn per-pixel 2D features, which can be attended to by CLIP concept embeddings. We use an alignment loss to align 3D features with 2D features so that we can perform concept grounding on the 3D representations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 518, + 547, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 547, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 547, + 639 + ], + "type": "text", + "content": "2D Feature Extraction. To get per-pixel features that can be attended by concept embeddings, we use the features from language-driven semantic segmentation (CLIP-LSeg) [37], which learns 2D per-pixel features from a pre-trained vision-language model (i.e., [46]). Specifically, it uses the text encoder from CLIP, trains an image encoder to produce an embedding vector for each pixel, and calculates the scores of word-pixel correlation by dot-product. By outputting the semantic class with the maximum score of each pixel, CLIP-LSeg is able to perform zero-shot 2D semantic segmentation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": "3D-2D Alignment. In addition to density and color, we also store a 512-dim feature in each grid cell in the compact representation. To align the 3D per-point features with 2D per-pixel features, we calculate an L1 loss between each pixel and each 3D point sampled on the ray of the pixel. The overall L1 loss along a ray is the weighted sum of all" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9206" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "the pixel-point alignment losses, with weights same as the rendering weights: " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{feature}} = \\sum_{i=1}^{K} w_i (\\| \\pmb{f}_i - F(\\pmb{r}) \\|)" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "\\pmb{r}" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is a ray corresponding to a 2D pixel, " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "F(\\pmb{r})" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the 2D feature from CLIP-LSeg, " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the total number of sampled points along the ray and " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "\\pmb{f}_i" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the feature of point " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " by interpolating through the feature voxel grid, " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "w_i" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the rendering weight." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": "Concept Grounding through Attention. Since our feature voxel grid representation is learnt from CLIP-LSeg, by calculating the dot-product attention " + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "inline_equation", + "content": "< f, v >" + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": " between perpoint 3D feature " + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": " and the CLIP concept embeddings " + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": ", we can get zero-shot view-independent concept grounding and semantic segmentations in the 3D representation, as is presented in Fig. 2 IV." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 236, + 205, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 205, + 249 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 205, + 249 + ], + "type": "text", + "content": "4.3. Neural Reasoning Operators" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 255, + 288, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 288, + 338 + ], + "type": "text", + "content": "Finally, we use the grounded semantic concepts for 3D reasoning from language. We first transform questions into a sequence of operators that can be executed on the 3D representation for reasoning. We adopt a LSTM-based semantic parser [65] for that. As [26, 40], we further devise a set of operators which can be executed on the 3D representation. Please refer to Appendix for a full list of operators." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "content": "Filter Operators. We filter all the grid cells with a certain semantic concept." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 363, + 287, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 423 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 423 + ], + "type": "text", + "content": "Get Instance Operators. We implement this by utilizing DBSCAN [15], an unsupervised algorithm which assigns clusters to a set of points. Specifically, given a set of points in the 3D space, it can group together the points that are closely packed together for instance segmentation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 423, + 288, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 288, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 288, + 579 + ], + "type": "text", + "content": "Relation Operators. We cannot directly execute the relation on the 3D representation as we have not grounded relations. Thus, we represent each relation using a distinct neural module (which is practical as the vocabulary of relations is limited [36]). We first concatenate the voxel grid representations of all the referred objects and feed them into the relation network. The relation network consists of three 3D convolutional layers and then three 3D deconvolutional layers. A score is output by the relation network indicating whether the objects have the relationship or not. Since vanilla 3D CNNs are very slow, we use Sparse Convolution [10] instead. Based on the relations asked in the questions, different relation modules are chosen." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 590, + 128, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 128, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 128, + 604 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 610, + 164, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 610, + 164, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 610, + 164, + 624 + ], + "type": "text", + "content": "5.1. Experimental Setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "type": "text", + "content": "Evaluation Metric. We report the visual question answering accuracy on the proposed 3DMV-VQA dataset w.r.t the four types of questions. The train/val/test split is 7:1:2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "type": "text", + "content": "Implementation Details For 3D compact representations, we adopt the same architectures as DVGO, except skipping the coarse reconstruction phase and directly training the fine reconstruction phase. After that, we freeze the density voxel" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 547, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 263 + ], + "type": "text", + "content": "grid and color voxel grid, for the optimization of the feature voxel grid only. The feature grid has a world size of 100 and feature dim of 512. We train the compact representations for 100,000 iterations and the 3D features for another 20,000 iterations. For LSeg, we use the official demo model, which has the ViT-L/16 image encoder and CLIP's ViT-B/32 text encoder. We follow the official script for inference and use multi-scale inference. For DBSCAN, we use an epsilon value of 1.5, minimum samples of 2, and we use L1 as the clustering method. For the relation networks, each relation is encoded into a three-layer sparse 3D convolution network with hidden size 64. The output is then fed into a one-layer linear network to produce a score, which is normalized by sigmoid function. We use cross-entropy loss to train the relation networks, and we use the one-hop relational questions with \"yes/no\" answers to train the relation networks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 271, + 372, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 271, + 372, + 283 + ], + "spans": [ + { + "bbox": [ + 306, + 271, + 372, + 283 + ], + "type": "text", + "content": "5.2. Baselines" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 289, + 547, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 289, + 547, + 326 + ], + "spans": [ + { + "bbox": [ + 305, + 289, + 547, + 326 + ], + "type": "text", + "content": "Our baselines range from vanilla neural networks, attention-based methods, fine-tuned from large-scale VLM, and graph-based methods, to neural-symbolic methods." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 333, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 306, + 333, + 547, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 333, + 547, + 406 + ], + "spans": [ + { + "bbox": [ + 306, + 333, + 547, + 406 + ], + "type": "text", + "content": "- LSTM. The question is transferred to word embeddings which are input into a word-level LSTM [25]. The last LSTM hidden state is fed into a multi-layer perceptron (MLP) that outputs a distribution over answers. This method is able to model question-conditional bias since it uses no image information." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 409, + 547, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 409, + 547, + 482 + ], + "spans": [ + { + "bbox": [ + 306, + 409, + 547, + 482 + ], + "type": "text", + "content": "- CNN+LSTM. The question is encoded by the final hidden states from LSTM. We use a resnet-50 to extract frame-level features of images and average them over the time dimension. The features are fed to an MLP to predict the final answer. This is a simple baseline that examines how vanilla neural networks perform on 3DMV-VQA." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 486, + 547, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 486, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 306, + 486, + 547, + 533 + ], + "type": "text", + "content": "- 3D-Feature+LSTM. We use the 3D features we get from 3D-2D alignment and downsample the voxel grids using 3D-CNN as input, concatenated with language features from LSTM and fed to an MLP." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 537, + 547, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 537, + 547, + 586 + ], + "spans": [ + { + "bbox": [ + 306, + 537, + 547, + 586 + ], + "type": "text", + "content": "- MAC [30]. MAC utilizes a Memory, Attention and Composition cell to perform iterative reasoning process. Like CNN+LSTM, we use the average pooling over multi-view images as the feature map." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 590, + 547, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 590, + 547, + 637 + ], + "spans": [ + { + "bbox": [ + 306, + 590, + 547, + 637 + ], + "type": "text", + "content": "- MAC(V). We treat the multi-view images along a trajectory as a video. We modify the MAC model by applying a temporal attention unit across the video frames to generate a latent encoding for the video." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "type": "text", + "content": "- NS-VQA [65]. This is a 2D version of our 3D-CLR model. We use CLIP-LSeg to ground 2D semantic concepts from multi-view images, and the relation network also takes the 2D features as input. We execute the operators on each image and max pool from the answers to get our final predictions." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9207" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 70, + 451, + 213 + ], + "blocks": [ + { + "bbox": [ + 143, + 70, + 451, + 213 + ], + "lines": [ + { + "bbox": [ + 143, + 70, + 451, + 213 + ], + "spans": [ + { + "bbox": [ + 143, + 70, + 451, + 213 + ], + "type": "table", + "html": "
| Methods | Concept | Counting | Relation | Comparison | Overall |
| Q-type (rand.) | 49.4 | 10.7 | 21.6 | 49.2 | 26.4 |
| LSTM | 53.4 | 15.3 | 24.0 | 55.2 | 29.8 |
| CNN+LSTM | 57.8 | 22.1 | 35.2 | 59.7 | 37.8 |
| MAC | 62.4 | 19.7 | 47.8 | 62.3 | 46.7 |
| MAC(V) | 60.0 | 24.6 | 51.6 | 65.9 | 50.0 |
| NS-VQA | 59.8 | 21.5 | 33.4 | 61.6 | 38.0 |
| ALPRO | 65.8 | 12.7 | 42.2 | 68.2 | 43.3 |
| LGCN | 56.2 | 19.5 | 35.5 | 66.7 | 39.1 |
| 3D-Feature+LSTM | 61.2 | 22.4 | 49.9 | 61.3 | 48.2 |
| 3D-CLR (Ours) | 66.1 | 41.3 | 57.6 | 72.3 | 57.7 |