diff --git "a/2025/4K4DGen_ Panoramic 4D Generation at 4K Resolution/layout.json" "b/2025/4K4DGen_ Panoramic 4D Generation at 4K Resolution/layout.json" new file mode 100644--- /dev/null +++ "b/2025/4K4DGen_ Panoramic 4D Generation at 4K Resolution/layout.json" @@ -0,0 +1,14824 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 104, + 79, + 192, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 79, + 192, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 192, + 95 + ], + "type": "text", + "content": "4K4DGEN:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 99, + 480, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 99, + 480, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 99, + 480, + 116 + ], + "type": "text", + "content": "PANORAMIC 4D GENERATION AT 4K RESOLUTION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "spans": [ + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": "Renjie " + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "\\mathsf{Li}^{*1,4}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Panwang " + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "\\mathsf{Pan}^{*\\dagger\\ddagger1}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Bangbang Yang" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "^{*1}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Dejia " + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "\\mathsf{Xu}^{*2}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Shijie Zhou" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Xuanyang Zhang" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Zeming " + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "\\mathsf{Li}^{1}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Achuta Kadambi" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Zhangyang Wang" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Zhengzhong " + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "\\mathsf{Tu}^{4}" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "text", + "content": ", Zhiwen Fan" + }, + { + "bbox": [ + 110, + 133, + 530, + 158 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 158, + 454, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 158, + 454, + 169 + ], + "spans": [ + { + "bbox": [ + 112, + 158, + 454, + 169 + ], + "type": "text", + "content": "1Bytedance, 2 University of Texas at Austin, 3 University of California, Los Angeles," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 169, + 214, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 169, + 214, + 181 + ], + "spans": [ + { + "bbox": [ + 112, + 169, + 214, + 181 + ], + "type": "text", + "content": "Texas A&M University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 112, + 181, + 269, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 181, + 269, + 192 + ], + "spans": [ + { + "bbox": [ + 112, + 181, + 269, + 192 + ], + "type": "text", + "content": "https://4k4dgen.github.io/" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 192, + 240, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 192, + 240, + 202 + ], + "spans": [ + { + "bbox": [ + 112, + 192, + 240, + 202 + ], + "type": "text", + "content": "paulpanwang@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 111, + 214, + 500, + 373 + ], + "blocks": [ + { + "bbox": [ + 111, + 214, + 500, + 373 + ], + "lines": [ + { + "bbox": [ + 111, + 214, + 500, + 373 + ], + "spans": [ + { + "bbox": [ + 111, + 214, + 500, + 373 + ], + "type": "image", + "image_path": "2cd733ddc9346b7bb5d4c2e7b1d2b7bfc50bfba93deaa710a8006ff505dcec90.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "lines": [ + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "text", + "content": "Figure 1: 4K4DGen takes a static panoramic image with a resolution of " + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "inline_equation", + "content": "4096 \\times 2048" + }, + { + "bbox": [ + 104, + 388, + 506, + 434 + ], + "type": "text", + "content": " and allows animation through user interaction or an input mask, transforming the static panorama into dynamic Gaussian Splatting. 4K4DGen supports the rendering of novel views at various timestamps, enriching immersive virtual exploration." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 276, + 449, + 335, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 449, + 335, + 460 + ], + "spans": [ + { + "bbox": [ + 276, + 449, + 335, + 460 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "spans": [ + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "text", + "content": "The blooming of virtual reality and augmented reality (VR/AR) technologies has driven an increasing demand for the creation of high-quality, immersive, and dynamic environments. However, existing generative techniques either focus solely on dynamic objects or perform outpainting from a single perspective image, failing to meet the requirements of VR/AR applications that need free-viewpoint, " + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "text", + "content": " virtual views where users can move in all directions. In this work, we tackle the challenging task of elevating a single panorama to an immersive 4D experience. For the first time, we demonstrate the capability to generate omnidirectional dynamic scenes with " + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "text", + "content": " views at 4K (" + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "inline_equation", + "content": "4096 \\times 2048" + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "text", + "content": ") resolution, thereby providing an immersive user experience. Our method introduces a pipeline that facilitates natural scene animations and optimizes a set of 3D Gaussians using efficient splatting techniques for real-time exploration. To overcome the lack of scene-scale annotated 4D data and models, especially in panoramic formats, we propose a novel Panoramic Denoiser that adapts generic 2D diffusion priors to animate consistently in " + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 140, + 475, + 471, + 706 + ], + "type": "text", + "content": " images, transforming them into panoramic videos with dynamic scenes at targeted regions. Subsequently, we propose Dynamic Panoramic Lifting to elevate the panoramic video into a 4D immersive environment while preserving spatial and temporal consistency. By transferring prior knowledge from 2D models in the perspective domain to the panoramic domain and the 4D lifting with spatial appearance and geometry regularization, we achieve high-quality Panorama-to-4D generation at a resolution of 4K for the first time." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 721, + 350, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 721, + 350, + 732 + ], + "spans": [ + { + "bbox": [ + 121, + 721, + 350, + 732 + ], + "type": "text", + "content": "*: Equal contribution †: Project lead; ‡: Corresponding author." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 206, + 94 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "text", + "content": "With the increasing growth of generative techniques (Rombach et al., 2022; Blattmann et al., 2023a), the capability to create high-quality assets has the potential to revolutionize content creation across VR/AR and other spatial computing platforms. Unlike 2D displays such as smartphones or tablets, ideal VR/AR content must deliver an immersive and seamless experience, enabling 6-DoF virtual tours and supporting high-resolution 4D environments with omnidirectional viewing capabilities. Despite significant advancements in the generation of images, videos, and 3D models, the development of panoramic 4D content has lagged, primarily due to the scarcity of well-annotated, high-quality 4D training data. Even in the most relevant field of 4D generation, existing works mainly focus on generating or compositing object-level contents (Bahmani et al., 2024; Lin et al., 2024), which are often in low-resolution (e.g., below " + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "inline_equation", + "content": "1080\\mathrm{p}" + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "text", + "content": ") and cannot fulfill the demand of qualified immersive experiences. Based on these observations, we propose that an ideal generative tool for creating immersive environments should possess the following properties: (i) the generated content should exhibit high perceptual quality, reaching high-resolution (4K) output with dynamic elements (4D); (ii) the 4D representation must be capable of rendering coherent, continuous, and seamless " + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "text", + "content": " panoramic views in real time, supporting efficient 6-DoF virtual tours. However, creating diverse, high-quality 4D panoramic assets presents two significant challenges: (i) the scarcity of large-scale, annotated 4D data, particularly in panoramic formats, limits the training of specialized models. (ii) achieving both fine-grained local details and global coherence in 4D and 4K panoramic views is difficult for existing 2D diffusion models. These models, typically trained on perspective images with narrow fields of view (FoV), cannot be easily adapted to the expansive scopes of large panoramic images (see Exp. 4.3). On another front, video diffusion models (An et al., 2023) trained with web-scale multi-modal data have demonstrated versatile utility as region-based dynamic priors, and Gaussian Splatting (Kerbl et al., 2023) has shown efficient capabilities in modeling 4D environment. Thus, we address the large-scale, omnidirectional dynamic scene generation (4D panoramic generation) problem by utilizing the generative power of diffusion models to animate static panoramic images, transforming them into realistic, dynamic scenes that can support immersive, " + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 105, + 106, + 506, + 425 + ], + "type": "text", + "content": " viewing experiences. To achieve this, we propose to elevate the dynamic panoramic video to 4D environment assets using a set of dynamic Gaussians, which can be seamlessly integrated into VR/AR platforms for real-time rendering and interaction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "text", + "content": "In this paper, we introduce 4K4DGen, a novel framework designed to enable the creation of panoramic 4D environments at resolutions up to 4K. 4K4DGen addresses the key challenges of maintaining consistent object dynamics across the entire " + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "text", + "content": " field-of-view (FoV) in panoramic videos, while preserving both spatial and temporal coherence as the video transitions into a fully interactive 4D environment. Specifically, we propose the Panoramic Denoiser, which animates " + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "text", + "content": " FoV panoramic images by denoising spherical latent codes corresponding to user-interacted regions. The Panoramic Denoiser leverages a well-trained diffusion model originally designed for narrow- " + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "inline_equation", + "content": "\\mathrm{FoV}" + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "text", + "content": " perspective images, enabling the generation of " + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 105, + 430, + 506, + 617 + ], + "type": "text", + "content": " dynamic panoramas while ensuring global coherence and continuity throughout the entire panorama. To transform the omnidirectional panoramic video into a 4D environment, we introduce Dynamic Panoramic Lifting, which corrects scale discrepancies using a depth estimator enriched with perspective prior knowledge to generate panoramic depth maps. Additionally, it employs time-dependent 3D Gaussians optimized with spatial-temporal geometry alignment to ensure cross-frame consistency in dynamic scene representation and rendering. By adapting generic 2D statistical patterns from the perspective domain to the panoramic format and effectively regularizing Gaussian optimization with geometric principles, we achieve high-quality 4K panorama-to-4D content generation with photorealistic novel-view synthesis capabilities. Our contributions can be summarized as follows." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 627, + 504, + 725 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 627, + 504, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 627, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 132, + 627, + 504, + 649 + ], + "type": "text", + "content": "- We introduce 4K4DGen, the first framework capable of generating high-resolution (up to " + }, + { + "bbox": [ + 132, + 627, + 504, + 649 + ], + "type": "inline_equation", + "content": "4096 \\times 2048" + }, + { + "bbox": [ + 132, + 627, + 504, + 649 + ], + "type": "text", + "content": ") 4D omnidirectional assets without the need for annotated 4D data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 654, + 504, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 654, + 504, + 687 + ], + "spans": [ + { + "bbox": [ + 132, + 654, + 504, + 687 + ], + "type": "text", + "content": "- We propose the Panoramic Denoiser, which transfers generative priors from pre-trained 2D perspective diffusion models to the panoramic space, enabling consistent animation of panoramas with dynamic scene elements." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 692, + 504, + 725 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 692, + 504, + 725 + ], + "spans": [ + { + "bbox": [ + 132, + 692, + 504, + 725 + ], + "type": "text", + "content": "- We introduce Dynamic Panoramic Lifting, a method that transforms dynamic panoramic videos into dynamic Gaussians, incorporating spatial-temporal regularization to ensure cross-frame consistency and coherence." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 157, + 70, + 455, + 219 + ], + "blocks": [ + { + "bbox": [ + 157, + 70, + 455, + 219 + ], + "lines": [ + { + "bbox": [ + 157, + 70, + 455, + 219 + ], + "spans": [ + { + "bbox": [ + 157, + 70, + 455, + 219 + ], + "type": "image", + "image_path": "435b108ca8775b4f6dfcca6ff77524175354df6281c7f41f1ca694031ffd72d1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "lines": [ + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 504, + 262 + ], + "type": "text", + "content": "Figure 2: Panoramic Denoiser adapts diffusion priors from the perspective domain to the panoramic domain by simultaneously denoising perspective views and integrating them into spherical latents at each denoising step. This approach ensures consistent animation across multiple views." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 280, + 212, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 212, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 212, + 293 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 311, + 506, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 311, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 311, + 506, + 521 + ], + "type": "text", + "content": "Diffusion-based Image and Video Generation. Recent advancements have significantly expanded the capabilities of generating 2D images using diffusion models, as evidenced in several studies (Dhariwal & Nichol, 2021; Nichol et al., 2021; Podell et al., 2024; Ramesh et al., 2022; Saharia et al., 2022). Notably, Stable Diffusion (Rombach et al., 2022) optimizes diffusion models (DMs) within the latent spaces of autoencoders, striking an effective balance between computational efficiency and high image quality. Beyond text conditioning, there is increasing emphasis on integrating additional control signals for more precise image generation (Mou et al., 2024; Zhang et al., 2023). For example, ControlNet (Zhang et al., 2023) enhances the Stable Diffusion encoder to seamlessly incorporate these signals. Furthermore, the generation of images with consistent perspective views is gaining attention, such as the training-based techniques like (Tang et al., 2023; Hollein et al., 2024), or the sampling-based techniques like (Song et al., 2023; Bar-Tal et al., 2023; Lee et al., 2023; Quattrini et al., 2025). Diffusion models are also extensively applied in video generation, as demonstrated by various recent works (Ge et al., 2023; Ho et al., 2022; Wang et al., 2023a; Wu et al., 2023b; 2024b; Zhou et al., 2022). For instance,Imagen Video (Ho et al., 2022) utilizes a series of video diffusion models to generate videos from textual descriptions. Similarly,Make-A-Video (Singer et al., 2023) advances a diffusion-based text-to-image model to create videos without requiring paired text-video data. MagicVideo (Zhou et al., 2022) employs frame-wise adaptors and a causal temporal attention module for text-to-video synthesis. Video Latent Diffusion Model (VLDM) (Blattmann et al., 2023b) incorporates temporal layers into a 2D diffusion model to generate temporally coherent videos." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 733 + ], + "type": "text", + "content": "3D/4D Large-scale Generation. In recent 3D computer vision, a large-scale scene is usually represented as implicit or explicit fields for its appearance (Mildenhall et al., 2020; Kerbl et al., 2023), geometry (Peng et al., 2020; Wang et al., 2023b; Huang et al., 2023), and semantics (Kerr et al., 2023; Zhou et al., 2024a; Qin et al., 2024). We mainly discuss the 3D Gaussian Splatting (3DGS) (Kerbl et al., 2023) based generation here. Several works including DreamGaussian (Tang et al., 2024), GaussianDreamer (Yi et al., 2024), GSGEN (Chen et al., 2023), CG3D (Vilesov et al., 2023), and DiffSplat (Lin et al., 2025) employ 3DGS to generate diverse 3D objects and lay the foundations for compositionality, while LucidDreamer (Chung et al., 2023), Text2Immersion (Ouyang et al., 2023), GALA3D (Zhou et al., 2024c), RealmDreamer (Shriram et al., 2024), and DreamScene360 (Zhou et al., 2024b) aim to generate static large-scale 3D scenes from text. Considering the current advancements in 3D generation, investigations into 4D generation using 3DGS representation have also been conducted. DreamGaussian4D (Ren et al., 2024) accomplishes 4D generation based on a reference image. AYG (Ling et al., 2023) equips 3DGS with dynamic capabilities through a deformation network for text-to-4D generation. Besides, Efficient4D (Pan et al., 2024) and 4DGen (Yin et al., 2023) explore video-to-4D generation, and utilize SyncDreamer (Liu et al., 2023) to produce multi-view images from input frames as pseudo ground truth for training a dynamic 3DGS. 4K4D (Xu et al., 2024) is a high-resolution reconstruction technique that extends 3DGS to model complex human motion with detailed backgrounds while achieving real-time rendering speed." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "type": "text", + "content": "Panoramic Representation. A panorama is an image that captures a wide, unbroken view of an area, typically encompassing a field of vision much wider than what a standard photo would cover, providing a more immersive representation of the subject. Recently, novel view synthesis using panoramic representation has been widely explored. For instance, PERF (Wang et al., 2024a) trains a panoramic neural radiance field from a single panorama to synthesize " + }, + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "type": "text", + "content": " novel views. 360Roam (Huang et al., 2022) proposed learning an omnidirectional neural radiance field and progressively estimating a 3D probabilistic occupancy map to speed up volume rendering. OmniNeRF (Gu et al., 2022) introduced an end-to-end framework for training NeRF using only " + }, + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 104, + 82, + 507, + 237 + ], + "type": "text", + "content": " RGB images and their approximate poses. PanoHDR-NeRF (Gera et al., 2022) learns the full HDR radiance field from a low dynamic range (LDR) omnidirectional video by freely moving a standard camera around. In the realm of 3DGS, 360-GS (Bai et al., 2024) takes 4 panorama images and 2D room layouts as scene priors to reconstruct the panoramic Gaussian radiance field. DreamScene360 (Zhou et al., 2024b) achieves text-to-3D Panoramic Gaussian Splatting by utilizing monocular depth priors to regularize the Gaussian optimization." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 262, + 211, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 211, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 211, + 274 + ], + "type": "text", + "content": "3 METHODOLOGY" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 292, + 507, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 507, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 507, + 370 + ], + "type": "text", + "content": "Taking a single panoramic image as input, the goal of 4K4DGen is to generate a panoramic 4D environment capable of rendering novel views from arbitrary angles and at various timestamps. Our approach initially constructs a panoramic video and then elevates it into a series of 3D Gaussians, enabling efficient splatting for flexible rendering. Naively animating projected perspective images, however, often results in unnatural motion and inconsistent animations. To overcome this, our method proposes the denoising of projected spherical latents, ensuring consistent animation of the panoramic video from the original image, as detailed in Sec. 3.3." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 374, + 507, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 507, + 421 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 507, + 421 + ], + "type": "text", + "content": "Moreover, directly converting multiple perspective images from different timestamps into 4D frequently leads to degraded geometry and visible artifacts (see Sec. 4.3). We address this by applying spatial-temporal geometry fusion to lift the panoramic video, as described in Sec. 3.4. The complete pipeline of 4K4DGen is illustrated in Fig. 3." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 160, + 437, + 455, + 650 + ], + "blocks": [ + { + "bbox": [ + 160, + 437, + 455, + 650 + ], + "lines": [ + { + "bbox": [ + 160, + 437, + 455, + 650 + ], + "spans": [ + { + "bbox": [ + 160, + 437, + 455, + 650 + ], + "type": "image", + "image_path": "044887e476c18df502754fb8e96c591c8064607eb53dc46503ff6794a635538e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 656, + 507, + 724 + ], + "lines": [ + { + "bbox": [ + 104, + 656, + 507, + 724 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 507, + 724 + ], + "type": "text", + "content": "Figure 3: Overall Pipeline. Beginning with a static panorama as input, the Animating Phase generates a panoramic video by first mapping the panorama into a spherical latent space, followed by denoising within the perspective space, fusing back to the spherical latent space at each step, and finally transforming it into the panoramic space. In the 4D Lifting Phase, a series of dynamic Gaussians is employed to lift the panoramic video into a 4D representation, ensuring both spatial and temporal consistency." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "text", + "content": "3.1 PRELIMINARIES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": "Latent Diffusion Models (LDMs). LDMs (Rombach et al., 2022) consist of a forward procedure " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " and a backward procedure " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ". The forward procedure gradually introduces noise into the initial latent code " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathbb{R}^{h \\times w \\times c}" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_0 = \\mathcal{E}(I)" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " is the latent code of image " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " within the latent space of a VAE, denoted by " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ". Given the latent code at step " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "t - 1" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ", the " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " procedure is described as " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "q(x_t | x_{t-1}) = \\mathcal{N}(x_t; \\sqrt{1 - \\beta_t} x_{t-1}, \\beta_t I)" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ". Conversely, the backward procedure " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ", aimed at progressively removing noise, is defined as " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "p_\\theta(x_{t-1} | x_t) = \\mathcal{N}(\\mu_\\theta(x_t, t), \\Sigma_\\theta(x_t, t))" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ". In practical applications, images are generated under the condition " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ", by progressively sampling from " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_T" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " down to " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ". Recently, image-to-video (I2V) generation has been realized (Guo et al., 2024; Dai et al., 2023) by extending the latent code with an additional frame dimension and performing decoding at each frame. The denoising procedure is succinctly represented as " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_{t-1} = \\Phi(x_t, I)" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_t, x_{t-1} \\in \\mathbb{R}^{l \\times h \\times w \\times c}" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " represent the sampled latent codes and " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " the conditioning image. Recently, image-to-video (I2V) generation has been achieved (Guo et al., 2024; Dai et al., 2023) by extending the latent code with an additional frame dimension and performing decoding at each frame. The denoising procedure is succinctly expressed as " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_{t-1} = \\Phi(x_t, I)" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "x_t, x_{t-1} \\in \\mathbb{R}^{l \\times h \\times w \\times c}" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " represent the sampled latent codes, and " + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 102, + 506, + 269 + ], + "type": "text", + "content": " represents the conditioning image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": "Omnidirectional Panoramic Representation. Panoramic images or videos, denoted as " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ", are typically represented using equirectangular projections, forming an " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "H \\times W \\times C" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": " matrix, where " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": " denote the image resolution and " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": " represents the number of channels. While this format preserves the matrix structure, making it consistent with planar images captured by conventional cameras, it introduces distortions, especially noticeable near the polar regions of the projection. To mitigate these distortions, we adopt a spherical representation for panoramas, where pixel values are defined on a sphere " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathbb{S}^2 = \\{\\pmb{d} = (x,y,z)|x,y,z \\in \\mathbb{R} \\land |\\pmb{d}| = 1\\}" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ". For a more precise definition of the projection, we represent matrix-like images using a mapping " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_I: [-1,1]^2 \\to \\mathbb{R}^C" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ", which normalizes the image coordinates into the range " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "[0,1]" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ". Thus, for any given pixel " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "(x,y) \\in [-1,1]^2" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ", the corresponding pixel value is determined by " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_I(x,y)" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ". We define the spherical representation of panoramas using the field " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_I: \\mathbb{S}^2 \\to \\mathbb{R}^C" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_I(\\pmb{d})" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": " gives the pixel value at a given direction " + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "inline_equation", + "content": "\\pmb{d} = (x,y,z)" + }, + { + "bbox": [ + 104, + 280, + 506, + 425 + ], + "type": "text", + "content": ". The relationship between the spherical and equirectangular representations is established through the following projection formula:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 196, + 430, + 505, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 430, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 196, + 430, + 505, + 456 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {I} (x, y, z) = \\mathcal {E} _ {I} \\left(\\frac {1}{\\pi} \\operatorname {a r c c o s} \\frac {y}{\\sqrt {1 - z ^ {2}}}, \\frac {2}{\\pi} \\arcsin z\\right). \\tag {1}", + "image_path": "a6bff777ed11d4331d672f6633ff900a8840262ec3a216f70ae2f9b4a94d2a64.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": "For perspective images, we define a virtual camera centered at the origin. The rays for each pixel are determined through ray casting, as described in (Mildenhall et al., 2020), where each ray " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "\\pmb{d}" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": " is represented by " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "r(x,y,f,\\pmb{u},\\pmb{s},R)" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": ". This representation takes into account the focal length " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": ", the z-axis direction " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": ", the image plane size " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": ", and the camera's rotation along the z-axis " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": ". Consequently, for a given panorama " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": ", the perspective image " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": " can be projected using these camera parameters " + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "inline_equation", + "content": "(f,\\pmb{u},\\pmb{s},R)" + }, + { + "bbox": [ + 104, + 461, + 504, + 527 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 228, + 528, + 504, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 528, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 228, + 528, + 504, + 540 + ], + "type": "interline_equation", + "content": "\\mathcal {E} _ {P} (x, y) = \\mathcal {S} _ {I} \\circ \\boldsymbol {r} (x, y, f, \\boldsymbol {u}, \\boldsymbol {s}, R). \\tag {2}", + "image_path": "89b5f2b8778f2f8704013786983f1ba19e8c630eec63847df74f8b951b7b2c66.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "spans": [ + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": "In this paper, we fix the focal length " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": ", the image plane size " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": ", and the rotation " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": ". We denote the process of projecting the panorama " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": " into a perspective image " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": ", based on the camera's " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": "-axis direction " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": ", as " + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "inline_equation", + "content": "i = \\gamma(I, \\mathbf{u})" + }, + { + "bbox": [ + 104, + 543, + 504, + 577 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 590, + 310, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 590, + 310, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 310, + 601 + ], + "type": "text", + "content": "3.2 INCONSISTENT PERSPECTIVE ANIMATION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "Large-scale pre-trained 2D models have shown remarkable generative capabilities in creating images and videos, benefiting from vast multi-modal training data gathered from the Internet. However, acquiring high-quality 4D training data is considerably more challenging, and no current 4D dataset reaches the scale of those available for images and videos. Therefore, our approach aims to utilize the capabilities of video generative models to produce consistent panoramic " + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": " videos, which are then elevated to 4D. Nonetheless, the availability of panoramic videos is significantly more limited compared to planar perspective videos. Consequently, mainstream image-to-video (I2V) animation techniques may not perform optimally for panoramic formats, and the resolution of the videos remains constrained, as illustrated in Fig. 5 (b) and Tab. 2. Alternatively, the animator can be applied to perspective images. but this introduces inconsistencies across different projected views, as depicted in Fig. 5 (c)" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 296, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 296, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 296, + 94 + ], + "type": "text", + "content": "3.3 CONSISTENT PANORAMIC ANIMATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 103, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 103, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 506, + 193 + ], + "type": "text", + "content": "Limited by the scarcity of 4D training data in panoramic format, and given that large diffusion models are primarily trained on planar perspective videos, directly applying 2D perspective denoisers presents challenges in generating seamless panoramic videos with proper equirectangular projection, due to inconsistent motion across different views and the domain gap between spherical and perspective spaces. This constraint has driven us to develop a panoramic video generator in spherical space that leverages priors from general image-to-video (I2V) animation techniques, as shown in Fig. 2. Consequently, starting from a static input panorama, we animate it into a panoramic video, as demonstrated in the \"Animating Phase\" section of Fig. 3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": "Spherical Latent Space. To generate panoramic video from a static panorama, we build up the denoise-in-latent-space schema (An et al., 2023; Blattmann et al., 2023a; Dai et al., 2023) in a spherical context. For general video generation, a noisy latent sample is progressively denoised using DDPM (Ho et al., 2020), conditioned on a static input image, and subsequently decoded into a video sequence by a pre-trained VAE decoder. However, in 4K4DGen, unlike the method for generating perspective planar videos, both the latent code and the static panorama input are represented on spheres. We start with the initial panoramic latent code " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "S^T: \\mathbb{S}^2 \\to \\mathbb{R}^{L \\times c}" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": " denotes the number of video frames and " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": " the channels per frame. A novel Panoramic Denoiser is then applied to generate the clean panoramic latent code " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "S^0" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": ", conditioned on the static input panorama " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "I \\in \\mathbb{R}^{H \\times W}" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": ". Subsequently, the equirectangular projection, as introduced in Sec. 3.1, projects the clean panoramic latent code into the matrix-like latent code " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "Z^0 \\in \\mathbb{R}^{h \\times w \\times L \\times c}" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": " representing the resolution of the latent code. Each " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "k^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": " video frame " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "I^k" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": " in pixel space is decoded by the pre-trained VAE decoder as " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "I^k = \\mathcal{D}(Z^0[:.,.,k,:])" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": "Build the Panoramic Denoiser. We leverage a pre-trained perspective video generative model (Dai et al., 2023) to build our Panoramic Denoiser. This video generator takes a perspective image " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "i \\in \\mathbb{R}^{p_H \\times p_W \\times c}" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " and an initial latent code " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "z^T \\in \\mathbb{R}^{p_h \\times p_w \\times (L \\times c)}" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " as inputs, progressively denoising the latent code " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "z^T" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " to a clean state " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "z^0" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " through a denoising function " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "z^{t-1} = \\Phi(z^t, i)" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "p_h" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "p_w" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " represent the resolution of the latent code, " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "p_H" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "p_W" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " the resolution of the conditioning image, " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " the number of channels, and " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " the video length. Our goal is to transform the initial noisy panoramic latent code " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "S^T" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": " into the clean state " + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "inline_equation", + "content": "S^0" + }, + { + "bbox": [ + 104, + 361, + 506, + 484 + ], + "type": "text", + "content": ", ensuring that each perspective view is appropriately animated while maintaining global consistency. The underlying intuition is that if each perspective view undergoes its respective denoising process, the perspective video will feature meaningful animation. Moreover, if two perspective views overlap, they will align with each other (Jiménez, 2023; Bar-Tal et al., 2023; Lugmayr et al., 2022) to produce a seamless global animation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": "Given a static input panorama " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " and an initial spherical latent code " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "S^0:\\mathbb{S}^2\\to \\mathbb{R}^{L\\times c}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ", we progressively remove noise employing a project-and-fuse procedure at each denoising step. Specifically, the spherical latent code at the " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "t^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " denoising step, " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "S^t:\\mathbb{S}^2\\to \\mathbb{R}^{L\\times c}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ", is projected into multiple perspective latent codes " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}^t = \\{z_1^t,z_2^t,\\ldots ,z_n^t\\}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ", where each " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "z_k^t = \\gamma (S^t,d_k)\\in \\mathbb{R}^{p_h\\times p_w\\times (L\\times c)}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "k^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " perspective latent code projected in the equirectangular format detailed in Sec. 3.1. Each perspective latent code is then denoised by one step using a pre-trained perspective denoiser, denoted as " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "z_k^{t - 1} = \\Phi (z_k^t,i_k)" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "i_k = \\gamma (I,d_k)\\in \\mathbb{R}^{p_H\\times p_W\\times c}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " is the perspective conditioning image projected from the panorama " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ". Subsequently, we optimize the spherical latent code " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "S^{t - 1}:\\mathbb{S}^2\\to \\mathbb{R}^{L\\times c}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " at step " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "t - 1" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": " by fusing all the denoised perspective latent codes " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "z_{k}^{t - 1}" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ". Formally, the denoising procedure at step " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "inline_equation", + "content": "S^{t - 1} = \\Psi (S^t,I)" + }, + { + "bbox": [ + 104, + 489, + 506, + 604 + ], + "type": "text", + "content": ", encompasses the following operations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 177, + 610, + 504, + 630 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 610, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 177, + 610, + 504, + 630 + ], + "type": "interline_equation", + "content": "\\Psi \\left(\\mathcal {S} ^ {t}, I\\right) = \\underset {\\mathcal {S}} {\\operatorname {a r g m i n}} \\mathbb {E} _ {\\boldsymbol {d} \\in \\mathbb {S} ^ {2}} \\| \\gamma (\\mathcal {S}, \\boldsymbol {d}) - \\Phi \\left(\\gamma (\\mathcal {S} ^ {t}, \\boldsymbol {d}), \\gamma (I, \\boldsymbol {d})\\right) \\|. \\tag {3}", + "image_path": "5cedfdef13082b7ea0f375b22d88c264e20b7ab9fa0372a0ac0c0aefde2fa1ca.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 645, + 268, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 268, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 268, + 656 + ], + "type": "text", + "content": "3.4 DYNAMIC PANORAMIC LIFTING" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "We define the panoramic video as " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "V = \\{I^1, I^2, \\dots, I^L\\}" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ", consisting of " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": " frames. The video is divided into overlapping perspective videos " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\{v_0, v_1, \\dots, v_n\\}" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ", each captured from specific camera directions " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "\\{d_1, \\dots, d_n\\}" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ", collectively encompassing the entire span of the panoramic video " + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": ". Subsequently, we estimate the geometry of the 4D scene by fusing the depth maps through spatial-temporal geometry alignment. Following this, we describe our methodology for 4D representation and the subsequent rendering procedure." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": "Supervision from Spatial-Temporal Geometry Alignment. To transition from 2D video to 3D space, we utilize a monocular depth estimator (Ranftl et al., 2021), inspired by advancements in (Zhou et al., 2024b), to estimate the scene's geometric structure. Nonetheless, depth maps generated for each frame and perspective might lack spatial and temporal consistency. To address this, we implement Spatial-Temporal Geometry Alignment using a pre-trained depth estimator " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\Theta : \\mathbb{R}^{h \\times w \\times 3} \\to \\mathbb{R}^{h \\times w}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": ", applied to perspective images. Our objective is to amalgamate " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " perspective depth maps " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "D_{i}^{K} = \\Theta (\\gamma(I^{k}, d_{i}))" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " into a cohesive panoramic depth map " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "D^{k}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " for each frame " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "I^{k}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": ", ensuring spatial and temporal continuity. We express these depth maps as a spherical representation " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "S_{D}^{1}, \\ldots, S_{D}^{L}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": ". For enhanced optimization, we assign " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " scale factors " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\alpha_{i}^{k} \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " and shifting parameters " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\beta_{i}^{k} \\in \\mathbb{R}^{h \\times w}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " to each perspective depth map. The comprehensive depth map " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "D^{k}" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " is then optimized jointly with these parameters " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 506, + 205 + ], + "type": "text", + "content": ". The formal objective is structured as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 168, + 213, + 505, + 233 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 213, + 505, + 233 + ], + "spans": [ + { + "bbox": [ + 168, + 213, + 505, + 233 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {D} ^ {k} = \\underset {\\mathcal {S}} {\\operatorname {a r g m i n}} \\underset {i \\in \\{1, \\dots n \\}} {\\mathbb {E}} \\lambda_ {\\text {d e p t h}} \\mathcal {L} _ {\\text {d e p t h}} + \\lambda_ {\\text {s c a l e}} \\mathcal {L} _ {\\text {s c a l e}} + \\lambda_ {\\text {s h i f t}} \\mathcal {L} _ {\\text {s h i f t}}. \\tag {4}", + "image_path": "dca2d4c00edcc4f30eab096403499bfdb85e8bf68445016a9664d8ec1fdcbcc3.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{depth}} = \\|\\mathrm{softplus}(\\alpha_i^k)\\Theta(\\gamma(I^k, d_i)) - \\gamma(\\mathcal{S}) + \\beta_i^k\\|" + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": " is the depth supervision term, " + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{scale}} = \\|\\alpha_i^k - \\alpha_i^{k-1}\\| + \\|\\mathrm{softplus}(\\alpha_i^k) - 1\\|" + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": " the regularize term for " + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{shift}} = \\mathcal{L}_{\\mathrm{TV}}(\\beta_i^k) + \\|\\beta_i^k - \\beta_i^{K-1}\\|" + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": " the regularize term for " + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{TV}}" + }, + { + "bbox": [ + 104, + 242, + 504, + 281 + ], + "type": "text", + "content": " is the TV regularization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": "4D Representation and Rendering. We represent and render the dynamic scene using " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " sets of 3D Gaussians. Each set, corresponding to a specific timestamp " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": ", is denoted as " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "G_{t} = \\{(p_{t}^{i},q_{t}^{i},s_{t}^{i},c_{t}^{i},o_{t}^{i}) | i = 1,\\dots ,n\\}" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": ". This definition aligns with the methods described in (Bahmani et al., 2024), which also provides a fast rasterizer for rendering images based on these Gaussian sets and given camera parameters. Consistent with Sec. 3.1, while the camera intrinsics remain fixed, we parameterize the camera extrinsics through a position " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "\\pmb{p} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " and an orientation " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "\\pmb{d} \\in \\mathbb{S}^2" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": ". The training process is structured in two stages: initially, we directly supervise the 3D Gaussians using the panoramic videos. Let " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(G,\\pmb{p},\\pmb{d})" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " represent the rasterized image from Gaussian set " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": ", utilizing camera extrinsics " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "\\pmb{p} = 0" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " and camera direction " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "\\pmb{d}" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "t^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " frame of the panoramic video. We optimize the " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "t^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " Gaussian set " + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 104, + 293, + 506, + 403 + ], + "type": "text", + "content": " using the following objective:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 179, + 411, + 505, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 411, + 505, + 424 + ], + "spans": [ + { + "bbox": [ + 179, + 411, + 505, + 424 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {\\mathrm {r g b}} \\mathcal {L} _ {\\mathrm {r g b}} + \\lambda_ {\\text {t e m p o r a l}} \\mathcal {L} _ {\\text {t e m p o r a l}} + \\lambda_ {\\text {s e m}} \\mathcal {L} _ {\\text {s e m}} + \\lambda_ {\\text {g e o}} \\mathcal {L} _ {\\text {g e o}} \\tag {5}", + "image_path": "766f92d40a1fac289a57e9586fe766845d6c22a430a869b5a392647daa315226.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "type": "text", + "content": "where the RGB supervision term " + }, + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{rgb}} = \\lambda \\mathcal{L}_1 + (1 - \\lambda)\\mathcal{L}_{\\mathrm{SSIM}}" + }, + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "type": "text", + "content": " is the same as 3D-GS (Kerbl et al., 2023), and the temporal regularize term " + }, + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{temporal}}" + }, + { + "bbox": [ + 104, + 431, + 506, + 455 + ], + "type": "text", + "content": " written as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 199, + 471, + 505, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 471, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 199, + 471, + 505, + 502 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t e m p o r a l}} = \\sum_ {i = 1} ^ {n} \\| \\mathcal {R} \\left(G _ {t}, \\mathbf {0}, \\boldsymbol {d} _ {i}\\right) - \\mathcal {R} \\left(G _ {t - 1}, \\mathbf {0}, \\boldsymbol {d} _ {i}\\right) \\| \\tag {6}", + "image_path": "b7705b74b221723ce69105cb70c181519dc1c51b1ae6c46a15b7be02350cf341.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": "Then, we adopt the distillation loss and geometric regularization used in (Zhou et al., 2024b), the distillation loss is defined as follows: " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sem} = 1 - \\cos \\langle \\mathrm{CLS}(\\mathcal{R}(G_t,\\mathbf{0},\\pmb {d}_i)),\\mathrm{CLS}(\\mathcal{R}(G_t,\\pmb {\\delta}_p,\\pmb {d}_i))\\rangle" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\delta_p\\in [-\\alpha ,\\alpha ]^3" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " is the disturbing vector, " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\mathrm{CLS}(\\cdot)" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " the feature extractor such as DINO (Oquab et al., 2023), and " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\cos \\langle \\cdot ,\\cdot \\rangle" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " the cost value of two vectors. The geometric regularization is defined as follows: " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{geo} = 1 - \\frac{\\mathrm{Cov}(\\mathcal{R}_D(G_t,\\mathbf{0},\\pmb{d}_i),\\Theta(\\gamma(I,\\pmb {d}_i)))}{\\sqrt{\\mathrm{Var}(\\mathcal{R}_D(G_t,\\mathbf{0},\\pmb{d}_i))}\\mathrm{Var}(\\Theta(\\gamma(I,\\pmb {d}_i)))}" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_D" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " is the rendered depth, " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\mathrm{Cov}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " the covariance, and " + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "inline_equation", + "content": "\\mathrm{Var}(\\cdot)" + }, + { + "bbox": [ + 104, + 513, + 506, + 592 + ], + "type": "text", + "content": " the variance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 608, + 201, + 619 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 201, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 201, + 619 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 625, + 246, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 246, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 246, + 636 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETTINGS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": "Implementation Details. For perspective images, we uniformly select 20 directions " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "\\mathbf{u}" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " on the sphere " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "\\mathbb{S}^2" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " as the z-axis of 20 cameras. In each experiment, the image plane size " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " is set at " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "0.6\\times 0.6" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": ", with a focal length " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "f = 0.6" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " and a resolution of " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "512\\times 512" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": ". Rotation along the z-axis is kept at zero for all cameras, ensuring that the up-axis for the " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "i^{\\mathrm{th}}" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " camera aligns with the " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "(O,u_i,z)" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " plane. During the animating phase, we utilize the perspective denoiser " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": ", instantiated as theAnimate-anything model (Dai et al., 2023), which fine-tunes the SVD model (Blattmann et al., 2023a). In the Spatial-Temporal Geometric Alignment stage of the lifting phase, the depth estimator " + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 104, + 641, + 506, + 740 + ], + "type": "text", + "content": " is implemented using MiDaS (Ranftl et al., 2021; Birkl et al., 2023). All experiments are executed on a single NVIDIA A100 GPU with 80 GB RAM." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 130, + 80, + 216, + 125 + ], + "blocks": [ + { + "bbox": [ + 130, + 80, + 216, + 125 + ], + "lines": [ + { + "bbox": [ + 130, + 80, + 216, + 125 + ], + "spans": [ + { + "bbox": [ + 130, + 80, + 216, + 125 + ], + "type": "image", + "image_path": "9bbfe364a56ee0d51b6b52d873c5ba435d02036bc38d34b9dc3941aada792975.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 130, + 128, + 215, + 163 + ], + "blocks": [ + { + "bbox": [ + 130, + 128, + 215, + 163 + ], + "lines": [ + { + "bbox": [ + 130, + 128, + 215, + 163 + ], + "spans": [ + { + "bbox": [ + 130, + 128, + 215, + 163 + ], + "type": "image", + "image_path": "5bdb0ff078e36460979668047c021ff2820fe34fe56ca9a66dc19f055ea46524.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 130, + 167, + 216, + 210 + ], + "blocks": [ + { + "bbox": [ + 130, + 167, + 216, + 210 + ], + "lines": [ + { + "bbox": [ + 130, + 167, + 216, + 210 + ], + "spans": [ + { + "bbox": [ + 130, + 167, + 216, + 210 + ], + "type": "image", + "image_path": "9529e77428cf1245612b5c6d22048fb2721be97d605a4a2f937ad1470feed93f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 130, + 214, + 215, + 249 + ], + "blocks": [ + { + "bbox": [ + 130, + 214, + 215, + 249 + ], + "lines": [ + { + "bbox": [ + 130, + 214, + 215, + 249 + ], + "spans": [ + { + "bbox": [ + 130, + 214, + 215, + 249 + ], + "type": "image", + "image_path": "d3cf0e017258edd5a9ebaeb33e9730f71601ff61c807815e48cbc0d4c0d3275a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 130, + 252, + 216, + 296 + ], + "blocks": [ + { + "bbox": [ + 130, + 252, + 216, + 296 + ], + "lines": [ + { + "bbox": [ + 130, + 252, + 216, + 296 + ], + "spans": [ + { + "bbox": [ + 130, + 252, + 216, + 296 + ], + "type": "image", + "image_path": "733010ef98e739eafad854240956f3ae69bf48abf057d816b1a79589bb0310a4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 130, + 300, + 215, + 336 + ], + "blocks": [ + { + "bbox": [ + 130, + 300, + 215, + 336 + ], + "lines": [ + { + "bbox": [ + 130, + 300, + 215, + 336 + ], + "spans": [ + { + "bbox": [ + 130, + 300, + 215, + 336 + ], + "type": "image", + "image_path": "65d477d19e27f0d89dd7341f97fad2922039ca567f2dc343b00a69d086090796.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 348, + 504, + 403 + ], + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 403 + ], + "type": "text", + "content": "Figure 4: Comparison between 4K4DGen and 3D-Cinemagraphy. We present the input static panorama (Pano RGB), the corresponding text prompts, and the rendered results from different views and at various timestamps. 4K4DGen (Ours) effectively generates 4D scenes that are both spatially and temporally consistent, while 3D-Cinemagraphy (3D-Cin.) suffers from ghosting artifacts in the middle frames." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 219, + 80, + 346, + 166 + ], + "blocks": [ + { + "bbox": [ + 219, + 80, + 346, + 166 + ], + "lines": [ + { + "bbox": [ + 219, + 80, + 346, + 166 + ], + "spans": [ + { + "bbox": [ + 219, + 80, + 346, + 166 + ], + "type": "image", + "image_path": "7bf50256754cefa9a679885895ba4842fe8fc8ed0c0b766ba07c249653b356de.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 219, + 167, + 346, + 209 + ], + "blocks": [ + { + "bbox": [ + 219, + 167, + 346, + 209 + ], + "lines": [ + { + "bbox": [ + 219, + 167, + 346, + 209 + ], + "spans": [ + { + "bbox": [ + 219, + 167, + 346, + 209 + ], + "type": "image", + "image_path": "9224ee54cf2ac4a53e643ed6acbef5452c7c2a782e09e5d3a4ef888dc5e8bc8c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 219, + 209, + 346, + 252 + ], + "blocks": [ + { + "bbox": [ + 219, + 209, + 346, + 252 + ], + "lines": [ + { + "bbox": [ + 219, + 209, + 346, + 252 + ], + "spans": [ + { + "bbox": [ + 219, + 209, + 346, + 252 + ], + "type": "image", + "image_path": "7a68077cb1f59942bb9053d7a6ccbc7614238244088d8e340e219ad84c0bdd47.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 219, + 253, + 346, + 296 + ], + "blocks": [ + { + "bbox": [ + 219, + 253, + 346, + 296 + ], + "lines": [ + { + "bbox": [ + 219, + 253, + 346, + 296 + ], + "spans": [ + { + "bbox": [ + 219, + 253, + 346, + 296 + ], + "type": "image", + "image_path": "f86beb94fd93ee53d21b53820e039069e8d8acc6abe31ecd50e461984a9c5a53.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 219, + 296, + 346, + 338 + ], + "blocks": [ + { + "bbox": [ + 219, + 296, + 346, + 338 + ], + "lines": [ + { + "bbox": [ + 219, + 296, + 346, + 338 + ], + "spans": [ + { + "bbox": [ + 219, + 296, + 346, + 338 + ], + "type": "image", + "image_path": "2f5d7a8653807577c754ee91351b758a78bf3074822d0ca4a5ef31ef477febf9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 349, + 80, + 478, + 166 + ], + "blocks": [ + { + "bbox": [ + 349, + 80, + 478, + 166 + ], + "lines": [ + { + "bbox": [ + 349, + 80, + 478, + 166 + ], + "spans": [ + { + "bbox": [ + 349, + 80, + 478, + 166 + ], + "type": "image", + "image_path": "b6b074d3d286c95483f65c76b3ee205e7f470269b32ba939f02b7ac3d57e0ae5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 349, + 167, + 478, + 209 + ], + "blocks": [ + { + "bbox": [ + 349, + 167, + 478, + 209 + ], + "lines": [ + { + "bbox": [ + 349, + 167, + 478, + 209 + ], + "spans": [ + { + "bbox": [ + 349, + 167, + 478, + 209 + ], + "type": "image", + "image_path": "1e59b1830b5af0535c3f4fdfc9964ed23878d34c6306f524b63665d2d8cba6ce.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 349, + 209, + 478, + 252 + ], + "blocks": [ + { + "bbox": [ + 349, + 209, + 478, + 252 + ], + "lines": [ + { + "bbox": [ + 349, + 209, + 478, + 252 + ], + "spans": [ + { + "bbox": [ + 349, + 209, + 478, + 252 + ], + "type": "image", + "image_path": "e0f3acd7dcbe4dd1dbc99c5b893230bc52c5324341bd9c73e9a7697c06e1905f.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 349, + 253, + 478, + 296 + ], + "blocks": [ + { + "bbox": [ + 349, + 253, + 478, + 296 + ], + "lines": [ + { + "bbox": [ + 349, + 253, + 478, + 296 + ], + "spans": [ + { + "bbox": [ + 349, + 253, + 478, + 296 + ], + "type": "image", + "image_path": "7911f65fa5d11813bfce03cda57a5d62d977564536640efbe02c10c6f5fa8af4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 349, + 296, + 478, + 338 + ], + "blocks": [ + { + "bbox": [ + 349, + 296, + 478, + 338 + ], + "lines": [ + { + "bbox": [ + 349, + 296, + 478, + 338 + ], + "spans": [ + { + "bbox": [ + 349, + 296, + 478, + 338 + ], + "type": "image", + "image_path": "6d678e0f149f1cb7c936868c52c32c9e2e738673a480cc610dc7ba40fd3eae9d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "type": "text", + "content": "Evaluation. As there is no ground truth 4D scene data available, we render videos at specific test camera poses from the synthesized 4D representation and employ non-reference video/image quality assessment methods for quantitative evaluation of our approach. For the test views, we select random cameras with " + }, + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "type": "inline_equation", + "content": "p = 0" + }, + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "type": "text", + "content": " as part of our testing camera set. We then introduce disturbances as described in Sec. 3.4, applying a disturbance factor of " + }, + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "type": "inline_equation", + "content": "\\alpha = 0.05" + }, + { + "bbox": [ + 104, + 426, + 506, + 670 + ], + "type": "text", + "content": " at these selected views. Datasets. The task of generating 4D panoramas from static panoramas is new, and thus, no pre-existing datasets are available. In line with previous large-scale scene generation works (Zhou et al., 2024b; Yu et al., 2024), we evaluate our methodology using a dataset of 16 panoramas generated by text-to-panorama diffusion models (Yang et al., 2024). Bases. Current SDS-based methods (Wu et al., 2024a; Zhao et al., 2023) are limited to generating object-centered assets and do not support outward-facing scene generation. We compare our method with the optical-flow-based 3D dynamic image technique, 3D-Cinemagraphy (3D-Cin.) (Li et al., 2023b) (both the \"circle\" and \"zoom-in\" mode), by inputting the static panorama and projecting the output onto perspective images. Metrics. It is challenging to evaluate the visual quality without a ground-truth reference. We evaluate the rendered perspective videos regarding both the frame and video visual quality. For frame quality, We employ the LLM-based visual scorer Q-Align (Wu et al., 2023a) (IQ Scorer and IA Scorer) to evaluate the quality of individual frames. For video quality, we use the Q-Align video model (VQ) as the quality scorer. Additionally, we conduct user studies to further evaluate the results. In this paper, there are two types of user studies: (1) User Choice (UC), where participants are asked to compare and select the best video from candidates generated by different methods, and (2) User Agreement (UA), where participants assess whether specific properties are present in the videos generated by a particular approach." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 683, + 170, + 694 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 683, + 170, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 683, + 170, + 694 + ], + "type": "text", + "content": "4.2 RESULTS" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "Quantitative Results. We show the quantitative comparison between 4K4DGen and 3D-Cinemography (Li et al., 2023a) in Tab. 1. 4K4DGen consistently achieves better performance in the LLM-based Q-Align metric regarding the image quality (IQ), image aesthetic (IA), and the" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 158, + 501, + 198 + ], + "blocks": [ + { + "bbox": [ + 104, + 79, + 506, + 157 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 506, + 157 + ], + "type": "text", + "content": "Table 1: Comparison with 3D-Cinemagraphy. We compare our method with 3D-Cinemagraphy using rendered images from 4D representations. The IQ, IA, and VQ models represent the image quality scorer, image aesthetic scorer, and video quality scorer, respectively, within the Q-Align assessment framework. Our method, 4K4DGen, consistently achieves superior performance in both image and video quality across these metrics. Furthermore, 4K4DGen performs better in our user studies in terms of visual quality (Quality), motion amplitude (Amplitude), and the motion naturalness (Naturalness). Please refer to D.2 for further details." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 158, + 501, + 198 + ], + "lines": [ + { + "bbox": [ + 108, + 158, + 501, + 198 + ], + "spans": [ + { + "bbox": [ + 108, + 158, + 501, + 198 + ], + "type": "table", + "html": "
MethodQ-Align (IQ) ↑Q-Align (IA) ↑Q-Align (VQ) ↑Quality (UC) ↑Amplitude (UC) ↑Naturalness (UC) ↑
3D-Cinemagrophy (zoom-in)0.470.380.577%29.4%19.7%
3D-Cinemagrophy (circle)0.480.400.5812%32.0%21.1%
Ours (holistic pipeline)0.660.440.6281%38.6%59.2%
", + "image_path": "0134680683cc7d1e8c156f0bc29851755db5461c1c1f31d5c7b29878171f3203.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 206, + 504, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 206, + 504, + 231 + ], + "spans": [ + { + "bbox": [ + 104, + 206, + 504, + 231 + ], + "type": "text", + "content": "video quality (VQ). Besides, 4K4DGen is preferred by the users considering the video quality, motion amplitude, and motion naturalness." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 241, + 506, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 241, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 241, + 506, + 308 + ], + "type": "text", + "content": "Qualitative Results. We present a qualitative comparison between 4K4DGen and 3D-Cinemography (3D-Cin.) on the rendered images from 4D representations. Since the performance of 3D-Cin. is similar under the \"circle\" and \"zoom-in\" settings in Tab. 1, we use the \"circle\" setting to represent 3D-Cin. in Fig. 4. As shown in the figure, 4K4DGen produces high-quality perspective videos that maintain consistency across both time and views, whereas 3D-Cin. struggles with generating ghosting artifacts in the middle frames." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 313, + 217, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 217, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 217, + 324 + ], + "type": "text", + "content": "4.3 ABLATION STUDIES" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 333, + 504, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 333, + 504, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 333, + 504, + 389 + ], + "type": "text", + "content": "We conduct ablation studies for both the animating and lifting phases of our methodology. In the animating phase, we perform evaluation on 2D animated videos with different strategies, and highlight the importance of our spherical denoise strategy by replacing it with two basic animation techniques. In the lifting phase, we analyze the impact of excluding the Spatial-Temporal Geometry Alignment process and the temporal loss during the optimization of 4D representations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 401, + 506, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 506, + 523 + ], + "type": "text", + "content": "Animating Phase. For analyzing the strategies in the animating phase, as shown in Tab. 2, we use Q-Align (visual quality scorer), view-consistency (user agreement), motion amplitude (user choice), and motion naturalness (user choice) to evaluate the 2D animated videos. For the details of the user studies, please refer to the Appendix D.2. To animate the panorama into a panoramic video, a straightforward approach is to apply animators directly to the entire panorama. However, we observed that this strategy often results in minor motion, as shown in Fig. 5 (b) and Tab. 2 (Animate Pano with small motion amplitude and less naturalness). This issue arises due to two main reasons: (1) animators are typically trained on perspective images with a narrow field of view (FoV), whereas pancreas have a " + }, + { + "bbox": [ + 104, + 401, + 506, + 523 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 104, + 401, + 506, + 523 + ], + "type": "text", + "content": " FoV with specific distortions under the equirectangular projection; (2) our panorama is high-resolution (4K), which exceeds the training distribution of most 2D animators and can easily cause out-of-memory issues, even with an 80GB VRAM graphics card. Thus the" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 129, + 543, + 250, + 657 + ], + "blocks": [ + { + "bbox": [ + 129, + 543, + 250, + 657 + ], + "lines": [ + { + "bbox": [ + 129, + 543, + 250, + 657 + ], + "spans": [ + { + "bbox": [ + 129, + 543, + 250, + 657 + ], + "type": "image", + "image_path": "740355c1dbccf34946bfd78c27c1c7485e7f3c66f4582699524ba87c3e8c6f43.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 657, + 235, + 674 + ], + "lines": [ + { + "bbox": [ + 145, + 657, + 235, + 674 + ], + "spans": [ + { + "bbox": [ + 145, + 657, + 235, + 674 + ], + "type": "text", + "content": "(a) Our Spherical Denoiser (Consistent & Sufficient Motion)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 684, + 506, + 730 + ], + "lines": [ + { + "bbox": [ + 104, + 684, + 506, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 684, + 506, + 730 + ], + "type": "text", + "content": "Figure 5: Comparison to Different Animators: Animators trained primarily on perspective images tend to produce limited motion when applied to panoramas, and the resolution may be limited. On the other hand, animating perspective images individually can lead to inconsistencies between overlapping views." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 251, + 543, + 365, + 657 + ], + "blocks": [ + { + "bbox": [ + 251, + 543, + 365, + 657 + ], + "lines": [ + { + "bbox": [ + 251, + 543, + 365, + 657 + ], + "spans": [ + { + "bbox": [ + 251, + 543, + 365, + 657 + ], + "type": "image", + "image_path": "7ea9890efa6db2540a3e414d7dca6b51bd896b24147691d62ceaee9ccd8a3dbd.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 269, + 657, + 344, + 674 + ], + "lines": [ + { + "bbox": [ + 269, + 657, + 344, + 674 + ], + "spans": [ + { + "bbox": [ + 269, + 657, + 344, + 674 + ], + "type": "text", + "content": "(b)Animate Panorama (Small Motion)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 369, + 543, + 482, + 657 + ], + "blocks": [ + { + "bbox": [ + 369, + 543, + 482, + 657 + ], + "lines": [ + { + "bbox": [ + 369, + 543, + 482, + 657 + ], + "spans": [ + { + "bbox": [ + 369, + 543, + 482, + 657 + ], + "type": "image", + "image_path": "22261480aff15d18565a5f9f7fd95345f60c3537e88ba4c7f3352958002f7588.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 658, + 466, + 674 + ], + "lines": [ + { + "bbox": [ + 384, + 658, + 466, + 674 + ], + "spans": [ + { + "bbox": [ + 384, + 658, + 466, + 674 + ], + "type": "text", + "content": "(c)Animate Pers.Image (Inconsistent Motion)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 43, + 375, + 201 + ], + "blocks": [ + { + "bbox": [ + 126, + 43, + 375, + 201 + ], + "lines": [ + { + "bbox": [ + 126, + 43, + 375, + 201 + ], + "spans": [ + { + "bbox": [ + 126, + 43, + 375, + 201 + ], + "type": "image", + "image_path": "98c14ff0d0a1b71b27a75498da5cd3fc9325cb0fe1adcab63c1830debb93910d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 210, + 504, + 255 + ], + "lines": [ + { + "bbox": [ + 104, + 210, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 504, + 255 + ], + "type": "text", + "content": "Figure 6: Ablating Lifting Phase: (Left) The w/o " + }, + { + "bbox": [ + 104, + 210, + 504, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Temp}}" + }, + { + "bbox": [ + 104, + 210, + 504, + 255 + ], + "type": "text", + "content": " variant (column d) produces renderings with flashing stripes. Zoomed-in details of the flashing stripe region are highlighted in (e). (Right) Without spatial-temporal geometry alignment, the geometry in the smoke area of the volcano for the w/o STA variant (column g) appears less consistent compared to the full model (column f)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 376, + 52, + 484, + 200 + ], + "blocks": [ + { + "bbox": [ + 376, + 52, + 484, + 200 + ], + "lines": [ + { + "bbox": [ + 376, + 52, + 484, + 200 + ], + "spans": [ + { + "bbox": [ + 376, + 52, + 484, + 200 + ], + "type": "image", + "image_path": "07d9a5b2b3b23cdd581c29cef7a90327f87d96a8916eebefbfc0b34196092d36.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 257, + 506, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 257, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 506, + 356 + ], + "type": "text", + "content": "panoramas have to be down-sampled to a lower resolution (2K), causing a loss of details. To this end, we seek to animate on perspective views. Applying the animator on perspective views offers benefits such as reduced distortion and inputs that suit the domain of the animator, allowing for smooth animation of high-resolution panoramas. However, animating perspective images separately can introduce inconsistencies between overlapping perspective views, as illustrated in Fig. 5 (c) and Tab. 2 (Animate Pers.). To resolve this challenge, we propose simultaneously denoising all perspective views and fusing them at each denoising step, in the spherical latent space, which capitalizes on the benefits of animating perspective views while ensuring cross-view consistency. The results are displayed in Fig. 5 (a) and Tab. 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 357, + 504, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 379 + ], + "type": "text", + "content": "Lifting Phase. We conduct ablation studies on the Spatial-Temporal Geometry Alignment (STA) module and the temporal loss during the lifting phase, as shown in Fig. 6." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 384, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 472 + ], + "type": "text", + "content": "Table 2: Different Animation Strategies in the Animating Phase. We analyze the efficacy of animation strategies by evaluating the animated 2D videos in different ways. Animating the entire panorama results in worse motion and reduced resolution (first row), as indicated by the Amplitude and Naturalness metric. Conversely, animating from perspective views leads to inconsistencies across different views (second row), as supported by the Q-Align metric and the \"View-consistency (UA)\" study. 4K4DGen capitalizes the generative ability from perspective animating priors while enabling cross-view consistent motion between different perspectives, which achieves the best motion naturalness and amplitudes among all the settings (third row)." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 109, + 482, + 501, + 534 + ], + "blocks": [ + { + "bbox": [ + 109, + 482, + 501, + 534 + ], + "lines": [ + { + "bbox": [ + 109, + 482, + 501, + 534 + ], + "spans": [ + { + "bbox": [ + 109, + 482, + 501, + 534 + ], + "type": "table", + "html": "
AnimatorMax Pano. Res.Q-Align (VQ) ↑View-consistency (UA)↑Amplitude (UC) ↑Naturalness (UC) ↑
Animate Pano.2048 × 10240.82-26.8%17.8%
Animate Pers.4096 × 20480.6433%32.4%39.3%
Ours (Animating Phase)4096 × 20480.8570%40.8%42.9%
", + "image_path": "3ea95c32fd690ebf78157b10e764042f3547c8a43082b673864afb8e56d6b853.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 558, + 195, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 558, + 195, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 195, + 569 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 571, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 639 + ], + "type": "text", + "content": "Conclusion. We have proposed a novel framework 4K4DGen, allowing users to create high-quality 4K panoramic 4D content using text prompts, which delivers immersive virtual touring experiences. To achieve panorama-to-4D even without high-quality 4D training data, we integrate generic 2D prior models into the panoramic domain. Our approach involves a two-stage pipeline: initially generating panoramic videos using a Panoramic Denoiser, followed by 4D elevation through a Spatial-Temporal Geometry Alignment mechanism to ensure spatial coherence and temporal continuity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 504, + 733 + ], + "type": "text", + "content": "Limitation. First, the quality of temporal animation in the generated 4D environment mainly relies on the ability of the pre-trained I2V model. Future improvements could include the integration of a more advanced 2D animatior. Second, since our method ensures spatial and temporal continuity during the 4D elevation phase, it is currently unable to synthesize significant changes in the environment, such as the appearance of glowing fireflies or changing weather conditions. Third, the high-resolution and time-dependent representation of the generated 4D environment necessitates substantial storage capacity, which could be optimized in future work using techniques such as model distillation and pruning." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 176, + 93 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 100, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 105, + 100, + 507, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 100, + 507, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 100, + 507, + 134 + ], + "type": "text", + "content": "Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 507, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 507, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 507, + 187 + ], + "type": "text", + "content": "Sherwin Bahmani, Ivan Skorokhodov, Victor Rong, Gordon Wetzstein, Leonidas Guibas, Peter Wonka, Sergey Tulyakov, Jeong Joon Park, Andrea Tagliasacchi, and David B Lindell. 4d-fy: Text-to-4d generation using hybrid score distillation sampling. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 194, + 505, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 505, + 217 + ], + "type": "text", + "content": "Jiayang Bai, Letian Huang, Jie Guo, Wen Gong, Yuanqi Li, and Yanwen Guo. 360-gs: Layout-guided panoramic gaussian splatting for indoor roaming. arXiv preprint arXiv:2402.00763, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 224, + 505, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 505, + 247 + ], + "type": "text", + "content": "Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel. Multidiffusion: Fusing diffusion paths for controlled image generation. International conference on machine learning, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 505, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 505, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 278 + ], + "type": "text", + "content": "Reiner Birkl, Diana Wofk, and Matthias Müller. Midas v3.1 - a model zoo for robust monocular relative depth estimation. arXiv preprint arXiv:2307.14460, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 285, + 505, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 319 + ], + "type": "text", + "content": "Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127, 2023a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 327, + 507, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 507, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 507, + 371 + ], + "type": "text", + "content": "Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22563-22575, 2023b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 379, + 505, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 379, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 403 + ], + "type": "text", + "content": "Zilong Chen, Feng Wang, and Huaping Liu. Text-to-3d using gaussian splatting. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 410, + 506, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 506, + 434 + ], + "type": "text", + "content": "Jaeyoung Chung, Suyoung Lee, Hyeongjin Nam, Jaerin Lee, and Kyoung Mu Lee. Luciddreamer: Domain-free generation of 3d gaussian splatting scenes. arXiv preprint arXiv:2311.13384, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 441, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 506, + 464 + ], + "type": "text", + "content": "Zuozhuo Dai, Zhenghao Zhang, Yao Yao, Bingxue Qiu, Siyu Zhu, Long Qin, and Weizhi Wang. *Animateanything: Fine-grained open domain image animation with motion guidance*, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 471, + 505, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 505, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 494 + ], + "type": "text", + "content": "Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 502, + 505, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 505, + 525 + ], + "type": "text", + "content": "Mengyang Feng, Jinlin Liu, Miaomiao Cui, and Xuansong Xie. Diffusion360: Seamless 360 degree panoramic image generation based on diffusion models. arXiv preprint arXiv:2311.13141, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 532, + 507, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 532, + 507, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 507, + 577 + ], + "type": "text", + "content": "Songwei Ge, Seungjun Nah, Guilin Liu, Tyler Poon, Andrew Tao, Bryan Catanzaro, David Jacobs, Jia-Bin Huang, Ming-Yu Liu, and Yogesh Balaji. Preserve your own correlation: A noise prior for video diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 22930-22941, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 584, + 505, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 505, + 618 + ], + "type": "text", + "content": "Pulkit Gera, Mohammad Reza Karimi Dastjerdi, Charles Renaud, PJ Narayanan, and Jean-François Lalonde. Casual indoor hdr radiance capture from omnidirectional images. British Machine Vision Conference, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 625, + 505, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 505, + 660 + ], + "type": "text", + "content": "Kai Gu, Thomas Maugey, Sebastian Knorr, and Christine Guillemot. Omni-nerf: neural radiance field from 360 image captures. In 2022 IEEE International Conference on Multimedia and Expo (ICME), pp. 1-6. IEEE, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 668, + 505, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 505, + 701 + ], + "type": "text", + "content": "Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: animate your personalized text-to-image diffusion models without specific tuning. International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 709, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 732 + ], + "type": "text", + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 117 + ], + "type": "text", + "content": "Jonathan Ho, William Chan, Chitwan Sahara, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 123, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 123, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 506, + 168 + ], + "type": "text", + "content": "Lukas Hollein, Aljaž Božić, Norman Müller, David Novotny, Hung-Yu Tseng, Christian Richardt, Michael Zollhöfer, and Matthias Nießner. Viewdiff: 3d-consistent image generation with text-to-image models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5043-5052, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 176, + 505, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 176, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 505, + 210 + ], + "type": "text", + "content": "Huajian Huang, Yingshu Chen, Tianjian Zhang, and Sai-Kit Yeung. 360roam: Real-time indoor roaming using geometry-aware " + }, + { + "bbox": [ + 105, + 176, + 505, + 210 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 105, + 176, + 505, + 210 + ], + "type": "text", + "content": " radiance fields. SIGGRAPH Asia Conference Proceedings, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 217, + 505, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 505, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 505, + 253 + ], + "type": "text", + "content": "Jiahui Huang, Zan Gojcic, Matan Atzmon, Or Litany, Sanja Fidler, and Francis Williams. Neural kernel surface reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4369-4379, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 259, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 504, + 285 + ], + "type": "text", + "content": "Álvaro Barbero Jiménez. Mixture of diffusers for scene composition and high resolution image generation. arXiv preprint arXiv:2302.02412, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 292, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 504, + 316 + ], + "type": "text", + "content": "Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42(4):1-14, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 322, + 505, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 505, + 357 + ], + "type": "text", + "content": "Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Leref: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19729-19739, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 364, + 506, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 506, + 399 + ], + "type": "text", + "content": "Yuseung Lee, Kunho Kim, Hyunjin Kim, and Minhyuk Sung. Syncdiffusion: Coherent montage via synchronized joint diffusions. Advances in Neural Information Processing Systems, 36:50648-50660, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 405, + 506, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 405, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 506, + 441 + ], + "type": "text", + "content": "Xingyi Li, Zhiguo Cao, Huiqiang Sun, Jianming Zhang, Ke Xian, and Guosheng Lin. 3d cinematography from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4595-4605, 2023a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 447, + 506, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 447, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 506, + 482 + ], + "type": "text", + "content": "Xingyi Li, Zhiguo Cao, Huiqiang Sun, Jianming Zhang, Ke Xian, and Guosheng Lin. 3d cinematography from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4595-4605, June 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 489, + 506, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 489, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 506, + 524 + ], + "type": "text", + "content": "Chenguo Lin, Yuchen Lin, Panwang Pan, Xuanyang Zhang, and Yadong Mu. Instructlayout: Instruction-driven 2d and 3d layout synthesis with semantic graph prior, 2024. URL https://arxiv.org/abs/2407.07580." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 531, + 506, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 506, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 506, + 566 + ], + "type": "text", + "content": "Chenguo Lin, Panwang Pan, Bangbang Yang, Zeming Li, and Yadong Mu. Diffsplat: Repurposing image diffusion models for scalable gaussian splat generation, 2025. URL https://arxiv.org/abs/2501.16764." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 572, + 506, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 572, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 608 + ], + "type": "text", + "content": "Huan Ling, Seung Wook Kim, Antonio Torralba, Sanja Fidler, and Karsten Kreis. Align your gaussians: Text-to-4d with dynamic 3d gaussians and composed diffusion models. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 614, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 614, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 506, + 649 + ], + "type": "text", + "content": "Yuan Liu, Cheng Lin, Zijiao Zeng, Xiaoxiao Long, Lingjie Liu, Taku Komura, and Wenping Wang. Syncdreamer: Generating multiview-consistent images from a single-view image. International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 506, + 691 + ], + "type": "text", + "content": "Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, and Luc Van Gool. Repaint: Inpainting using denoising diffusion probabilistic models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 11461-11471, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 697, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 505, + 732 + ], + "type": "text", + "content": "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision, 2020." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 117 + ], + "type": "text", + "content": "Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 4296–4304, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. International conference on machine learning, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 198 + ], + "type": "text", + "content": "Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. Transactions on Machine Learning Research, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 203, + 504, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 504, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 504, + 227 + ], + "type": "text", + "content": "Hao Ouyang, Kathryn Heal, Stephen Lombardi, and Tiancheng Sun. Text2immersion: Generative immersive scene with 3d gaussians. arXiv preprint arXiv:2312.09242, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 233, + 504, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 504, + 256 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 504, + 256 + ], + "type": "text", + "content": "Zijie Pan, Zeyu Yang, Xiatian Zhu, and Li Zhang. Fast dynamic 3d object generation from a single-view video. arXiv preprint arXiv:2401.08742, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 262, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 505, + 297 + ], + "type": "text", + "content": "Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pp. 523-540. Springer, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 303, + 504, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 303, + 504, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 504, + 338 + ], + "type": "text", + "content": "Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 343, + 505, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 505, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 505, + 378 + ], + "type": "text", + "content": "Minghan Qin, Wanhua Li, Jiawei Zhou, Haoqian Wang, and Hanspeter Pfister. Langsplat: 3d language gaussian splatting. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 384, + 505, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 505, + 418 + ], + "type": "text", + "content": "Fabio Quattrini, Vittorio Pippi, Silvia Cascianelli, and Rita Cucchiara. Merging and splitting diffusion paths for semantically coherent panoramicas. In European Conference on Computer Vision, pp. 234-251. Springer, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 425, + 505, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 425, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 505, + 449 + ], + "type": "text", + "content": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1(2):3, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 454, + 505, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 454, + 505, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 454, + 505, + 487 + ], + "type": "text", + "content": "René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 12179-12188, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 495, + 505, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 505, + 529 + ], + "type": "text", + "content": "Jiawei Ren, Liang Pan, Jiaxiang Tang, Chi Zhang, Ang Cao, Gang Zeng, and Ziwei Liu. Dreamgaussian4d: Generative 4d gaussian splattering. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 536, + 505, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 536, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 505, + 570 + ], + "type": "text", + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10684-10695, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 576, + 505, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 505, + 621 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 505, + 621 + ], + "type": "text", + "content": "Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 628, + 505, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 505, + 651 + ], + "type": "text", + "content": "Jaidev Shriram, Alex Trevithick, Lingjie Liu, and Ravi Ramamoorthi. Realmdreamer: Text-driven 3d scene generation with inpainting and depth diffusion. arXiv preprint arXiv:2404.07199, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 657, + 505, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 692 + ], + "type": "text", + "content": "Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 732 + ], + "type": "text", + "content": "Liangchen Song, Liangliang Cao, Hongyu Xu, Kai Kang, Feng Tang, Junsong Yuan, and Zhao Yang. Roomdreamer: Text-driven 3d indoor scene synthesis with coherent geometry and texture. In Proceedings of the 31st ACM International Conference on Multimedia, pp. 6898-6906, 2023." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Wenqiang Sun, Shuo Chen, Fangfu Liu, Zilong Chen, Yueqi Duan, Jun Zhang, and Yikai Wang. Dimensionx: Create any 3d and 4d scenes from a single image with controllable video diffusion. arXiv preprint arXiv:2411.04928, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 158 + ], + "type": "text", + "content": "Jiaxiang Tang, Jiawei Ren, Hang Zhou, Ziwei Liu, and Gang Zeng. Dreamgaussian: Generative gaussian splatting for efficient 3d content creation. International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 166, + 506, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 166, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 506, + 201 + ], + "type": "text", + "content": "Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multi-view image generation with correspondence-aware diffusion. Advances in Neural Information Processing Systems, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 209, + 504, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 209, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 504, + 244 + ], + "type": "text", + "content": "Thomas Unterthiner, Sjoerd Van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. Towards accurate generative models of video: A new metric & challenges. arXiv preprint arXiv:1812.01717, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 251, + 504, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 504, + 286 + ], + "type": "text", + "content": "Thomas Unterthiner, Sjoerd van Steenkiste, Karol Kurach, Raphaël Marinier, Marcin Michalski, and Sylvain Gelly. Fvd: A new metric for video generation. International Conference on Learning Representations(ICLR), 2019." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 293, + 504, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 318 + ], + "type": "text", + "content": "Alexander Vilesov, Pradyumna Chari, and Achuta Kadambi. Cg3d: Compositional generation for text-to-3d via gaussian splatting. arXiv preprint arXiv:2311.17907, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 325, + 506, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 325, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 506, + 360 + ], + "type": "text", + "content": "Guangcong Wang, Peng Wang, Zhaoxi Chen, Wenping Wang, Chen Change Loy, and Ziwei Liu. Perf: Panoramic neural radiance field from a single panorama. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 368, + 506, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 506, + 392 + ], + "type": "text", + "content": "Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 399, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 504, + 434 + ], + "type": "text", + "content": "Qian Wang, Weiqi Li, Chong Mou, Xinhua Cheng, and Jian Zhang. 360dvd: Controllable panorama video generation with 360-degree video diffusion model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6913-6923, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 441, + 506, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 506, + 487 + ], + "type": "text", + "content": "Zhen Wang, Shijie Zhou, Jeong Joon Park, Despoina Paschalidou, Suya You, Gordon Wetzstein, Leonidas Guibas, and Achuta Kadambi. Alto: Alternating latent topologies for implicit 3d reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 259-270, 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 495, + 506, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 495, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 540 + ], + "type": "text", + "content": "Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4d gaussian splatting for real-time dynamic scene rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 20310-20320, 2024a." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 548, + 506, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 506, + 594 + ], + "type": "text", + "content": "Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Chunyi Li, Liang Liao, Annan Wang, Erli Zhang, Wenxiu Sun, Qiong Yan, Xiongkuo Min, Guangtai Zhai, and Weisi Lin. Q-align: Teaching Imms for visual scoring via discrete text-defined levels. arXiv preprint arXiv:2312.17090, 2023a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 601, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 506, + 647 + ], + "type": "text", + "content": "Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 7623-7633, 2023b." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 504, + 689 + ], + "type": "text", + "content": "Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024b." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 504, + 732 + ], + "type": "text", + "content": "Zhen Xu, Sida Peng, Haotong Lin, Guangzhao He, Jiaming Sun, Yujun Shen, Hujun Bao, and Xiaowei Zhou. 4k4d: Real-time 4d view synthesis at 4k resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 462 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "text", + "content": "Bangbang Yang, Wenqi Dong, Lin Ma, Wenbo Hu, Xiao Liu, Zhaopeng Cui, and Yuewen Ma. Dreamspace: Dreaming your room space with text-driven panoramic texture propagation. In 2024 IEEE Conference Virtual Reality and 3D User Interfaces (VR), pp. 650-660. IEEE, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 167 + ], + "type": "text", + "content": "Taoran Yi, Jiemin Fang, Guanjun Wu, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Qi Tian, and Xinggang Wang. Gaussian dreamer: Fast generation from text to 3d gaussians by bridging 2d and 3d diffusion models. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 175, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 175, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 504, + 198 + ], + "type": "text", + "content": "Yuyang Yin, Dejia Xu, Zhangyang Wang, Yao Zhao, and Yunchao Wei. 4dgen: Grounded 4d content generation with spatial-temporal consistency. arXiv preprint arXiv:2312.17225, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 204, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 204, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 107, + 204, + 505, + 228 + ], + "type": "text", + "content": "Hong-Xing Yu, Haoyi Duan, Charles Herrmann, William T Freeman, and Jiajun Wu. Wonderworld: Interactive 3d scene generation from a single image. arXiv preprint arXiv:2406.09394, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 234, + 505, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 234, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 505, + 269 + ], + "type": "text", + "content": "Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 3836-3847, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 275, + 505, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 275, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 107, + 275, + 505, + 298 + ], + "type": "text", + "content": "Yuyang Zhao, Zhiwen Yan, Enze Xie, Lanqing Hong, Zhenguo Li, and Gim Hee Lee. *Animate124: Animating one image to 4d dynamic scene.* arXiv preprint arXiv:2311.14603, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 305, + 505, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 305, + 505, + 328 + ], + "spans": [ + { + "bbox": [ + 107, + 305, + 505, + 328 + ], + "type": "text", + "content": "Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 335, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 380 + ], + "type": "text", + "content": "Shijie Zhou, Haoran Chang, Sicheng Jiang, Zhiwen Fan, Zehao Zhu, Dejia Xu, Pradyumna Chari, Suya You, Zhangyang Wang, and Achuta Kadambi. Feature 3dgs: Supercharging 3d gaussian splatting to enable distilled feature fields. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 387, + 505, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 387, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 387, + 505, + 422 + ], + "type": "text", + "content": "Shijie Zhou, Zhiwen Fan, Dejia Xu, Haoran Chang, Pradyumna Chari, Tejas Bharadwaj, Suya You, Zhangyang Wang, and Achuta Kadambi. Dreamscene360: Unconstrained text-to-3d scene generation with panoramic gaussian splatting. European Conference on Computer Vision, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 427, + 505, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 427, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 107, + 427, + 505, + 462 + ], + "type": "text", + "content": "Xiaoyu Zhou, Xingjian Ran, Yajiao Xiong, Jinlin He, Zhiwei Lin, Yongtao Wang, Deqing Sun, and Ming-Hsuan Yang. Gala3d: Towards text-to-3d complex scene generation via layout-guided generative gaussian splatting. International conference on machine learning, 2024c." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 183, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 183, + 93 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 183, + 93 + ], + "type": "text", + "content": "A APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 504, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 504, + 163 + ], + "type": "text", + "content": "Due to space constraints in the main draft, we include supplementary details and experimental results in the appendix. Specifically, in Sec. B, we provide details about the acquisition process for the static panoramas used in our experiments. In Sec. C, we offer further explanation of the implementation for both the animation and lifting phases. Finally, in Sec. D, we describe the experimental setup and present additional results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 180, + 283, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 283, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 283, + 192 + ], + "type": "text", + "content": "B ACQUISITION OF PANORAMAS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "text", + "content": "The static panoramas used in the dataset of the main draft are generated by a text-to-panorama diffusion model, fine-tuned from stable diffusion (Rombach et al., 2022) on SUN360. Similar to (Yang et al., 2024; Feng et al., 2023), this model follows three steps: circular blending, superresolution, and refinement. The panoramas are initially at a resolution of " + }, + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "inline_equation", + "content": "6144 \\times 3072" + }, + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "text", + "content": " and then down-sampled to " + }, + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "inline_equation", + "content": "4096 \\times 2048" + }, + { + "bbox": [ + 104, + 205, + 506, + 262 + ], + "type": "text", + "content": " using the bi-linear interpolation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 279, + 269, + 291 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 279, + 269, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 269, + 291 + ], + "type": "text", + "content": "C IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 327 + ], + "type": "text", + "content": "In this section, we introduce the implementation details of the panoramic animator and the 4D lifting procedure." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": "Implementation of Spherical Representing For the spherical representation, the continuous spherical mapping " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "S_I: \\mathbb{S}^2 \\to \\mathbb{R}^C" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " is instantiate as discrete point set " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{P} = \\{p_i\\}" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": ", which is uniformly sampled from the sphere " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "S_I" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": ". We first initialize a icosahedron with 20 triangle faces " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\{f_i | i = 1, \\dots, 20\\}" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " to approximate a real sphere " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\mathbb{S}^2" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": ". Then we uniformly sample a point set " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "P_i" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " on each face " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "f_i" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " and union all the point sets together as " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{P}} = \\cup_{i=1}^{20} P_i" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": ". We then obtain the discrete point set " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " by projecting " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{P}}" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " onto the sphere " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\mathbb{S}^2" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "inline_equation", + "content": "\\mathcal{P} = \\{p_i / \\| p_i\\| \\mid p_i \\in \\hat{\\mathcal{P}}\\}" + }, + { + "bbox": [ + 104, + 341, + 505, + 413 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "content": "Panoramic Animation Phase For the Panoramic Animator, we set the video length " + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "inline_equation", + "content": "L = 14" + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "content": ", the channel number " + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "inline_equation", + "content": "c = 9" + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "content": ", the latent code size " + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "inline_equation", + "content": "(h,w) = \\frac{1}{8} (H,W)" + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "content": ", the perspective image size " + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "inline_equation", + "content": "p_{H} = p_{W} = \\frac{1}{4} W" + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "content": ". The sphere is uniformly divided into 20 perspective views, each with " + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "inline_equation", + "content": "80^{\\circ}" + }, + { + "bbox": [ + 104, + 426, + 506, + 548 + ], + "type": "text", + "content": " FOV. For the denoiser, the max denoising step is 25. For the continuous optimization in Eq. 3, we calculate each latent vector at each point on the sphere by taking the weighted average on the latent vectors of knn points that are projected from the corresponding pixel on the perspective views, the weights are the inversed distances between the sphere point and the projected points. We conduct the spherical denoising for the first 10 denoising steps and then the spherical latent is projected to the equirectangular form and denoised using sliding window, to avoid noises introduced by interpolation. The perspective denoiser is initiated asAnimate-Anything (Dai et al., 2023). The masks required by the denoiser are given by bounding boxes defined by user clicks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": "Dynamic Panoramic Lifting Phase In the lifting phase, similar to the animation phase, we choose the perspective view number " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "n = 20" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": ", each with " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "80^{\\circ}" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": " FOV. Each perspective view has a square shape, " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "P_{H} = P_{W} = \\frac{1}{4} W" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": " is the width of the original static panorama. In the Spatial-Temporal Geometric Alignment stage, the depth estimator " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": " is implemented using MiDaS (Ranftl et al., 2021; Birkl et al., 2023). The depth map from the perspective image is scaled according to the projection of the unit-length ray direction onto the camera orientation " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": ". Formally, if the pre-scaled depth is " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": " at point " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "p \\in \\hat{\\mathcal{P}}" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": " introduced above, the scaled depth should be " + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "inline_equation", + "content": "d / \\| p \\|" + }, + { + "bbox": [ + 104, + 562, + 506, + 663 + ], + "type": "text", + "content": ". Additionally, for scenes without distinct boundaries, such as the sky, depth values for distant elements are assigned a finite value to support optimization." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "content": "**Optimization Details** The hyper-parameters for optimization are set as follows: " + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{depth}} = 1, \\lambda_{\\mathrm{scale}} = 0.1, \\lambda_{\\mathrm{shift}} = 0.01" + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "content": ". We conduct Spatial-Temporal Geometry Alignment optimization over 3000 iterations, with " + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{scale}}" + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{shift}}" + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "content": " set to zero during the first 1500 iterations. For the 4D representation training stage, Gaussian parameters are optimized over 10000 iterations for each time stamp " + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "content": ". The hyper-parameters for this stage are defined as " + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{rgb}} = 1, \\lambda_{\\mathrm{temporal}} = \\lambda_{\\mathrm{sem}} = \\lambda_{\\mathrm{geo}} = 0.05" + }, + { + "bbox": [ + 104, + 677, + 507, + 734 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "and the disturbance vector range " + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": " is varied at 0.05, 0.1, and 0.2 during the 5400, 6600, and 9000 iterations, respectively." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 258, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 258, + 134 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 258, + 134 + ], + "type": "text", + "content": "D EXPERIMENTAL DETAILS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 147, + 331, + 158 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 147, + 331, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 331, + 158 + ], + "type": "text", + "content": "D.1 THE PROCEEDING TIME OF PER-GENERATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 167, + 504, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 504, + 190 + ], + "type": "text", + "content": "Animating Phase. We provide the time and GPU cost to animate a single video at different resolutions in the Table 3." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 156, + 213, + 452, + 271 + ], + "blocks": [ + { + "bbox": [ + 195, + 198, + 414, + 211 + ], + "lines": [ + { + "bbox": [ + 195, + 198, + 414, + 211 + ], + "spans": [ + { + "bbox": [ + 195, + 198, + 414, + 211 + ], + "type": "text", + "content": "Table 3: The Proceeding Time of Animating Phase." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 156, + 213, + 452, + 271 + ], + "lines": [ + { + "bbox": [ + 156, + 213, + 452, + 271 + ], + "spans": [ + { + "bbox": [ + 156, + 213, + 452, + 271 + ], + "type": "table", + "html": "
ResolutionGPU Usage (GB) ↓Time Cost (Minutes/frame) ↓
1024 × 512 (1K)9.380.89
2048 × 1024 (2K)29.312.99
4096 × 2048 (4K)73.4122.31
", + "image_path": "c9f6b0f115148695c0c7a638b20899856b6c4584db93f58feaae63bbc7baf1e4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 281, + 506, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 301 + ], + "type": "text", + "content": "Lifting Phase. We provide the time and GPU cost to lift a single frame at 4K, 2K, and 1K resolution. The results are shown in the Table 4." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 156, + 327, + 452, + 384 + ], + "blocks": [ + { + "bbox": [ + 203, + 312, + 405, + 323 + ], + "lines": [ + { + "bbox": [ + 203, + 312, + 405, + 323 + ], + "spans": [ + { + "bbox": [ + 203, + 312, + 405, + 323 + ], + "type": "text", + "content": "Table 4: The Proceeding Time of Lifting Phase." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 156, + 327, + 452, + 384 + ], + "lines": [ + { + "bbox": [ + 156, + 327, + 452, + 384 + ], + "spans": [ + { + "bbox": [ + 156, + 327, + 452, + 384 + ], + "type": "table", + "html": "
ResolutionGPU Usage (GB) ↓Time Cost (Minutes/frame) ↓
1024 × 512 (1K)7.5919
2048 × 1024 (2K)12.0722
4096 × 2048 (4K)31.2733
", + "image_path": "24996ed17897e6d2d3a2274b4a58d582d465972915f20b49bed56c1783086932.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 392, + 506, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 506, + 415 + ], + "type": "text", + "content": "The computational cost increases rapidly with the resolution, making 4K generation highly challenging. We will put more engineering effort to accelerate the generation pipeline in the future." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 440, + 230, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 230, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 230, + 451 + ], + "type": "text", + "content": "D.2 USER STUDY DETAILS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 460, + 292, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 292, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 292, + 472 + ], + "type": "text", + "content": "D.2.1 USER STUDY FOR VIDEO QUALITY" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "type": "text", + "content": "We conducted two user studies, gathering a total of 84 questionnaires from 42 users. For the \"Quality (UC)\" column in Tab. 1, we collected 42 questionnaires, each containing eight questions. Each question asked users to choose the bests video in term of visual quality from the perspective videos provided by different models. The user choice (UC) score of a method is the percentage of times the method's video was selected as the best one, out of a total of 336 questions. Thus, the UC scores for all methods sum to " + }, + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "type": "text", + "content": ". For the \"View-Consistency (UA)\" column in Tab. 2, we collected another 42 questionnaires, with each questionnaire containing eight questions. Each question presented two videos from different views, both generated by the same method, and users were asked to determine whether the two videos were view-consistent. The user agreement (UA) score is the percentage of video pairs marked as view-consistent out of all the video pairs generated by the method. The UA scores do not necessarily sum to " + }, + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 479, + 506, + 635 + ], + "type": "text", + "content": ". In the Quality column of Tab. 1, among the 336 questions, users selected 4K4DGen 272 times, 3D-Cin. (circle) 40 times, and 3D-Cin. (zoom-in) 24 times. In the View-consistency column of Tab. 2, 118 out of 168 video pairs generated by \"Our\" were marked as consistent, while 56 out of 168 pairs from \"Animate Pers\" were considered consistent." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 647, + 289, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 289, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 289, + 658 + ], + "type": "text", + "content": "D.2.2 USER STUDY FOR VIDEO MOTION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 666, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 700 + ], + "type": "text", + "content": "Since the quantitative evaluation of motion quality remains an open problem in our tasks, we hereby conducted supplemented user studies for the \"4D generation task\" and \"Animating Phase\", which considers the motion's naturalness and amplitude." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 709, + 506, + 733 + ], + "type": "text", + "content": "- Motion's naturalness: the motion of the generated view should be natural to human's understanding, avoiding abrupt pixel changes across frames." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 82, + 504, + 106 + ], + "type": "text", + "content": "- Motion's amplitude: the motion trajectory of the scene's subjects should have adequate and realistic magnitude." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": "The user studies of Amplitude column and the Naturalness column are all conducted in the \"user choice\" (UC) way. For each Amplitude and Naturalness column in Tab. 1 and Tab. 2, we collect 320 questions from 20 participants. Each question contains three videos from the different three methods, users are asked to select the best one or more videos from the provided set that exhibit noticeably greater amplitude (for the Amplitude columns) or superior naturalness (for the Naturalness columns). Since users could select more than one video per question, the UC metric was normalized based on the total number of selections. For example, if method A, B, C are selected " + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "inline_equation", + "content": "n_a" + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "inline_equation", + "content": "n_b" + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "inline_equation", + "content": "n_c" + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": " times, respectively, the UC metric for them should be " + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "inline_equation", + "content": "\\frac{n_a}{n_a + n_b + n_c}" + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "inline_equation", + "content": "\\frac{n_b}{n_a + n_b + n_c}" + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "inline_equation", + "content": "\\frac{n_c}{n_a + n_b + n_c}" + }, + { + "bbox": [ + 104, + 117, + 506, + 210 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 227, + 269, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 227, + 269, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 269, + 239 + ], + "type": "text", + "content": "D.3 MORE QUANTITATIVE RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 250, + 504, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 250, + 504, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 504, + 284 + ], + "type": "text", + "content": "We present quantitative results on an additional 32 scenes randomly sampled from WEB360 dataset (Wang et al., 2024b). As shown in the Table 5, 4K4DGen consistently outperforms the baseline methods across the quantitative metrics." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 108, + 356, + 500, + 409 + ], + "blocks": [ + { + "bbox": [ + 104, + 299, + 504, + 354 + ], + "lines": [ + { + "bbox": [ + 104, + 299, + 504, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 504, + 354 + ], + "type": "text", + "content": "Table 5: Comparison with 3D-Cinemagraphy in WEB360 Dataset. We adopt the FVD (Unterthiner et al., 2019) and KVD (Unterthiner et al., 2018) to evaluate the generated panoramic video, which is the intermediate result from the animating phase. The IQ, IA, and VQ models represent the image quality scorer, image aesthetic scorer, and video quality scorer, respectively, within the Q-Align assessment framework." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 356, + 500, + 409 + ], + "lines": [ + { + "bbox": [ + 108, + 356, + 500, + 409 + ], + "spans": [ + { + "bbox": [ + 108, + 356, + 500, + 409 + ], + "type": "table", + "html": "
MethodFVD ↓KVD ↓Q-Align (IQ) ↑Q-Align (IA) ↑Q-Align (VQ) ↑
3D-Cinemagraphy (zoom-in)3075.860.650.570.70
3D-Cinemagraphy (circle)3095.720.650.570.70
4K4DGen2181.760.730.640.77
", + "image_path": "51571d01078b8d9c003fe5a8a35a7a5c827ee9dc01008c8b69fc89c9470086f4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 429, + 343, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 343, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 343, + 440 + ], + "type": "text", + "content": "D.4 COMPARISONS WITH MORE BASELINE METHODS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 452, + 504, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 452, + 504, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 504, + 509 + ], + "type": "text", + "content": "To further address your primary concern, we adopt two types of baseline methods: 4D Object Generation and 4D Scene Generation, to compare with the proposed method. Note that the panoramic 4D generation is still underexplored due to the scarcity of annotated data and the lack of well-trained prior models tailored for panorama format. As a result, we find that existing methods cannot achieve similar quality as 4K4DGen in this task." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 525, + 506, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 506, + 604 + ], + "type": "text", + "content": "4D Object Generation methods. Following your suggestion, we have devoted substantial engineering efforts to adapt the 4D object generation framework 4DGen (Yin et al., 2023) to our camera settings, as shown in the qualitative results in Figure 8 of the revised paper. It demonstrates that recent 4D object generation methods struggle to generate scene-level content due to inherent domain gaps between objects and scenes. Our method overwhelmingly outperforms 4DGen in terms of quantitative evaluations (e.g., Image Quality, Image Aesthetics, and Video Quality) and qualitative evaluations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "4D Generation Methods for Scene. We also construct two 4D scene generation baselines: (1) We equip LucidDreamer (Liu et al., 2023) with our animator. We follow the authors' setting, using ZoeDepth and its inpainting model (Stable Diffusion) to expand invisible views for each timestamp, and then we use our backbone animator to animate and optimize the Gaussians. (2) We employed a very recent 4D scene generation technique DimensionX (Sun et al., 2024). We use the default configuration, employing its lora (\"orbit left\") model to generate novel views and 3D structures. Since the DimensionX's T-Director is currently unavailable, we leveraged the same backbone animator from our approach to provide temporal guidance for its 4D representation in the 4D generation stage. Compared with existing methods, including 4DGen, LucidDreamer, and DimensionX, our method consistently achieves higher quantitative results, demonstrating the efficacy of the proposed method." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 92, + 500, + 192 + ], + "blocks": [ + { + "bbox": [ + 195, + 79, + 415, + 91 + ], + "lines": [ + { + "bbox": [ + 195, + 79, + 415, + 91 + ], + "spans": [ + { + "bbox": [ + 195, + 79, + 415, + 91 + ], + "type": "text", + "content": "Table 6: Comparison with 4D Generation Methods." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 92, + 500, + 192 + ], + "lines": [ + { + "bbox": [ + 108, + 92, + 500, + 192 + ], + "spans": [ + { + "bbox": [ + 108, + 92, + 500, + 192 + ], + "type": "table", + "html": "
MethodQ-Align (IQ) ↑Q-Align (IA) ↑Q-Align (VQ) ↑
4DGen (object)0.190.200.29
3D-Cinemagraphy (zoom-in)0.470.380.57
3D-Cinemagraphy (circle)0.480.400.58
LucidDreamer + Our Animator0.440.410.58
DimensionX + Our Animator0.550.420.60
4K4DGen0.660.440.62
", + "image_path": "e0e468c0c92022fe9f85354360e5a8b88f12dbc86ea1fe310eef4ceaf2dc4bb3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 200, + 263, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 200, + 263, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 263, + 213 + ], + "type": "text", + "content": "D.5 MORE QUALITATIVE RESULTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": "We provide additional qualitative results in Figure 7. Furthermore, we highly recommend viewing the video renderings of 4K4DGen and comparisons to baseline models in the supplementary static HTML page for a more comprehensive and immersive experience." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 259, + 506, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 506, + 348 + ], + "type": "text", + "content": "We adapt the 4D object generation framework 4DGen (Yin et al., 2023) to our specific settings and present the qualitative results in Figure 8, which indicate that the generated object varies significantly in form from 4K4DGen's scene outputs. We also compare OmniNeRF (Gu et al., 2022)'s optimized geometry with 4K4DGen. The corresponding depth maps are shown in Figure 9. It can be evidently demonstrated that 4K4DGen attains sharper geometric results. We provide the renderings of a lifted 3D scene where a user walked along a street in Figure 10. Notice the roof highlighted by green bounding boxes in (a) and (b). When the user walks nearer and more area of the roof is observed, it implies the necessity of the lifted 3D structure." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 364, + 355, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 355, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 355, + 376 + ], + "type": "text", + "content": "E ETHICS AND REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 388, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 456 + ], + "type": "text", + "content": "Ethics Statement. Our research enables the generation of 4D digital scenes from a single panoramic image, which is advantageous for various applications such as AR/VR, movie production, and video games. This technology distinctly excels in creating high-resolution 4D scenes up to 4K, significantly enhancing user experiences. However, there is potential for misuse in the creation of deceptive content or privacy violations, which contradicts our ethical intentions. These risks can be mitigated through a combination of regulatory and technical strategies, such as watermarking." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 467, + 506, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 506, + 522 + ], + "type": "text", + "content": "Reproducibility. We provide sufficient implementation details to reproduce our methodology in Sec. C, including the details of spherical denoiser, panoramic动员, dynamic panoramic lifting, etc. We provide 16 Sec. 4's panoramic and Sec. D.4's 32 panoramic in the revised supplementary material. Furthermore, we will make our panorama datasets and related code publicly available in the future." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 82, + 263, + 158 + ], + "blocks": [ + { + "bbox": [ + 115, + 82, + 263, + 158 + ], + "lines": [ + { + "bbox": [ + 115, + 82, + 263, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 263, + 158 + ], + "type": "image", + "image_path": "0a40f2966bb10204d82711fc679b73a0235bc55e5ae9fb77e2a1dfe5ab56a802.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 116, + 165, + 137, + 188 + ], + "blocks": [ + { + "bbox": [ + 116, + 165, + 137, + 188 + ], + "lines": [ + { + "bbox": [ + 116, + 165, + 137, + 188 + ], + "spans": [ + { + "bbox": [ + 116, + 165, + 137, + 188 + ], + "type": "image", + "image_path": "f40af1ce7d0616cc0020f97ae46765c3c24a34977280782e3b0bb0754dfd9d18.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 139, + 170, + 256, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 170, + 256, + 224 + ], + "spans": [ + { + "bbox": [ + 139, + 170, + 256, + 224 + ], + "type": "text", + "content": "sailing ship, monochrome, digital art, swirls, surreal, sea, waves, reflections, mountain landscape, fantasy, stylized water, starry sky, circular frame, high contrast, black and white, maritime" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 270, + 82, + 501, + 232 + ], + "blocks": [ + { + "bbox": [ + 270, + 82, + 501, + 232 + ], + "lines": [ + { + "bbox": [ + 270, + 82, + 501, + 232 + ], + "spans": [ + { + "bbox": [ + 270, + 82, + 501, + 232 + ], + "type": "image", + "image_path": "2f78bd81ce1f835f7e562839b310d27c2744ee73133d52fbef88b2f187e5a9c5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 270, + 236, + 501, + 385 + ], + "blocks": [ + { + "bbox": [ + 270, + 236, + 501, + 385 + ], + "lines": [ + { + "bbox": [ + 270, + 236, + 501, + 385 + ], + "spans": [ + { + "bbox": [ + 270, + 236, + 501, + 385 + ], + "type": "image", + "image_path": "18ada7109d3d544e669a70a6177897c4c85d626f7aabe0d358bcf19f0c1dc8dc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 115, + 236, + 263, + 312 + ], + "blocks": [ + { + "bbox": [ + 115, + 236, + 263, + 312 + ], + "lines": [ + { + "bbox": [ + 115, + 236, + 263, + 312 + ], + "spans": [ + { + "bbox": [ + 115, + 236, + 263, + 312 + ], + "type": "image", + "image_path": "14e13622f8cc856a369b794637d3b38f163785474b9a50fb4fffb1d97a68ef1e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 116, + 318, + 138, + 342 + ], + "blocks": [ + { + "bbox": [ + 116, + 318, + 138, + 342 + ], + "lines": [ + { + "bbox": [ + 116, + 318, + 138, + 342 + ], + "spans": [ + { + "bbox": [ + 116, + 318, + 138, + 342 + ], + "type": "image", + "image_path": "f40c35653f24c3151618b91a73b11540db603668bfe6e67ecd7f7744bc08b1e8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 325, + 259, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 325, + 259, + 380 + ], + "spans": [ + { + "bbox": [ + 140, + 325, + 259, + 380 + ], + "type": "text", + "content": "fireworks,cityscape,sunset, \npanorama,digital art,vibrant colors, \nfuturistic,urbancelebration, \nskyscrapers,streets,glowing lights, \ndusk,orange sky,purple hues, \nartistic, wide angle,..." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 270, + 391, + 501, + 539 + ], + "blocks": [ + { + "bbox": [ + 270, + 391, + 501, + 539 + ], + "lines": [ + { + "bbox": [ + 270, + 391, + 501, + 539 + ], + "spans": [ + { + "bbox": [ + 270, + 391, + 501, + 539 + ], + "type": "image", + "image_path": "696e493a04c474fa2502ab4f4e46fa5c75eda7d8c20b9a30dd614aa7dcaf6dbb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 115, + 391, + 263, + 464 + ], + "blocks": [ + { + "bbox": [ + 115, + 391, + 263, + 464 + ], + "lines": [ + { + "bbox": [ + 115, + 391, + 263, + 464 + ], + "spans": [ + { + "bbox": [ + 115, + 391, + 263, + 464 + ], + "type": "image", + "image_path": "848c36a0be200bda125c168de12710672bb8c770fdbea220396f212a26647302.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 116, + 473, + 138, + 496 + ], + "blocks": [ + { + "bbox": [ + 116, + 473, + 138, + 496 + ], + "lines": [ + { + "bbox": [ + 116, + 473, + 138, + 496 + ], + "spans": [ + { + "bbox": [ + 116, + 473, + 138, + 496 + ], + "type": "image", + "image_path": "65f911643864891071ae676bd8a935ee0975a2ecc0c3058f66f035c60851f418.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 473, + 259, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 473, + 259, + 536 + ], + "spans": [ + { + "bbox": [ + 140, + 473, + 259, + 536 + ], + "type": "text", + "content": "beach, seascape, waves, sandy shore, sailboat, horizon, clouds, blue sky, panoramic view, tranquil scene, digital painting, soft focus, pastel colors, wide-angle, nature, outdoor, no people, calm sea, coastline, vacation mood" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 270, + 544, + 501, + 693 + ], + "blocks": [ + { + "bbox": [ + 270, + 544, + 501, + 693 + ], + "lines": [ + { + "bbox": [ + 270, + 544, + 501, + 693 + ], + "spans": [ + { + "bbox": [ + 270, + 544, + 501, + 693 + ], + "type": "image", + "image_path": "c1f1468cb228205a9a7a6ac57820cfd16524760c002582c8e9a79444b08de287.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 270, + 703, + 504, + 727 + ], + "lines": [ + { + "bbox": [ + 270, + 703, + 504, + 727 + ], + "spans": [ + { + "bbox": [ + 270, + 703, + 504, + 727 + ], + "type": "text", + "content": "core visual results. For each shown case we provide the input and the rendering from two perspective views." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 115, + 544, + 263, + 619 + ], + "blocks": [ + { + "bbox": [ + 115, + 544, + 263, + 619 + ], + "lines": [ + { + "bbox": [ + 115, + 544, + 263, + 619 + ], + "spans": [ + { + "bbox": [ + 115, + 544, + 263, + 619 + ], + "type": "image", + "image_path": "19b8a33ecc115bbd41bab73d448e941b0fdebac4705c95c4c181aecb0b2fb189.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 116, + 625, + 138, + 647 + ], + "blocks": [ + { + "bbox": [ + 116, + 625, + 138, + 647 + ], + "lines": [ + { + "bbox": [ + 116, + 625, + 138, + 647 + ], + "spans": [ + { + "bbox": [ + 116, + 625, + 138, + 647 + ], + "type": "image", + "image_path": "c5c8bcc7bcc40a97ec0d0f306d0b0a692e920059d449b6891ea777587e23450f.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 140, + 628, + 253, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 628, + 253, + 691 + ], + "spans": [ + { + "bbox": [ + 140, + 628, + 253, + 691 + ], + "type": "text", + "content": "desert, sunset, sand dunes, yucca plants, mountain range, clouds, sky, warm color palette, panoramic view, digital art, serene, tranquil, nature, vast landscape, purple sky, orange hues, twilight, dusk, no people, scenic beauty, subtle shadows" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 129, + 83, + 487, + 350 + ], + "blocks": [ + { + "bbox": [ + 129, + 83, + 487, + 350 + ], + "lines": [ + { + "bbox": [ + 129, + 83, + 487, + 350 + ], + "spans": [ + { + "bbox": [ + 129, + 83, + 487, + 350 + ], + "type": "image", + "image_path": "3acc1b5f70e247918dd60dc5ce63940c29af2725b413a193b523c9fa4c5ced52.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 361, + 504, + 384 + ], + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 384 + ], + "type": "text", + "content": "Figure 8: Results of 4DGen: 4DGen (Yin et al., 2023) focuses on object generation and struggles to generate scenes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 130, + 400, + 487, + 696 + ], + "blocks": [ + { + "bbox": [ + 130, + 400, + 487, + 696 + ], + "lines": [ + { + "bbox": [ + 130, + 400, + 487, + 696 + ], + "spans": [ + { + "bbox": [ + 130, + 400, + 487, + 696 + ], + "type": "image", + "image_path": "07f8c28a9484bd95396c954e05fe8de00620d37151228f27440fc31a7dd7cda9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 700, + 504, + 724 + ], + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 724 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 724 + ], + "type": "text", + "content": "Figure 9: Results of OmniNeRF: The optimized geometry of OmniNeRF (Gu et al., 2022) is not as sharp as 4K4DGen." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 312, + 485, + 438 + ], + "blocks": [ + { + "bbox": [ + 126, + 312, + 485, + 438 + ], + "lines": [ + { + "bbox": [ + 126, + 312, + 485, + 438 + ], + "spans": [ + { + "bbox": [ + 126, + 312, + 485, + 438 + ], + "type": "image", + "image_path": "19b0b584c18fd40008548c602bd31e24bc3bd8fb4ecc80e315ee37e899b2aa5e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 189, + 438, + 242, + 450 + ], + "lines": [ + { + "bbox": [ + 189, + 438, + 242, + 450 + ], + "spans": [ + { + "bbox": [ + 189, + 438, + 242, + 450 + ], + "type": "text", + "content": "(a) Stand far" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 364, + 438, + 424, + 450 + ], + "lines": [ + { + "bbox": [ + 364, + 438, + 424, + 450 + ], + "spans": [ + { + "bbox": [ + 364, + 438, + 424, + 450 + ], + "type": "text", + "content": "(b) Stand near" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 463, + 506, + 498 + ], + "lines": [ + { + "bbox": [ + 104, + 463, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 463, + 506, + 498 + ], + "type": "text", + "content": "Figure 10: Occlusion in True 3D Structure: When the user walks nearer, the more area of the roof (highlighted by the green box) will be observed. It is hard to implement such effect without the lifted 3D structure." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file