diff --git "a/2025/Unbiased Missing-modality Multimodal Learning/layout.json" "b/2025/Unbiased Missing-modality Multimodal Learning/layout.json" new file mode 100644--- /dev/null +++ "b/2025/Unbiased Missing-modality Multimodal Learning/layout.json" @@ -0,0 +1,11850 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 152, + 103, + 460, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 103, + 460, + 121 + ], + "spans": [ + { + "bbox": [ + 152, + 103, + 460, + 121 + ], + "type": "text", + "content": "Unbiased Missing-modality Multimodal Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "spans": [ + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "content": "Ruiting Dai" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "content": ", Chenxi Li" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "content": ", Yandong Yan" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "content": ", Lisi Mo" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "content": ", Ke Qin" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "inline_equation", + "content": "^{1,3}" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "text", + "content": ", Tao He" + }, + { + "bbox": [ + 129, + 141, + 484, + 157 + ], + "type": "inline_equation", + "content": "^{1,3*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 162, + 157, + 449, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 157, + 449, + 185 + ], + "spans": [ + { + "bbox": [ + 162, + 157, + 449, + 185 + ], + "type": "text", + "content": "1University of Electronic Science and Technology of China \n2School of Computer Science, Peking University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 185, + 505, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 505, + 199 + ], + "type": "text", + "content": "3Ubiquitous Intelligence and Trusted Services Key Laboratory of Sichuan Province" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 134, + 201, + 474, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 201, + 474, + 213 + ], + "spans": [ + { + "bbox": [ + 134, + 201, + 474, + 213 + ], + "type": "text", + "content": "{rtdai,qinke,morris} @uestc.edu.cn, chenxi.li@std.uestc.edu.cn," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 188, + 216, + 422, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 216, + 422, + 226 + ], + "spans": [ + { + "bbox": [ + 188, + 216, + 422, + 226 + ], + "type": "text", + "content": "ai_yan@stu.pku.edu.cn, tao.he01@hotmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 152, + 255, + 200, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 255, + 200, + 268 + ], + "spans": [ + { + "bbox": [ + 152, + 255, + 200, + 268 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "text", + "content": "Recovering missing modalities in multimodal learning has recently been approached using diffusion models to synthesize absent data conditioned on available modalities. However, existing methods often suffer from modality generation bias: while certain modalities are generated with high fidelity, others—such as video—remain challenging due to intrinsic modality gaps, leading to imbalanced training. To address this issue, we propose " + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "inline_equation", + "content": "MD^2 N" + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "text", + "content": " (Multi-stage Duplex Diffusion Network), a novel framework for unbiased missing-modality recovery. " + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "inline_equation", + "content": "MD^2 N" + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "text", + "content": " introduces a modality transfer module within a duplex diffusion architecture, enabling bidirectional generation between available and missing modalities through three stages: (1) global structure generation, (2) modality transfer, and (3) local cross-modal refinement. By training with duplex diffusion, both available and missing modalities generate each other in an intersecting manner, effectively achieving a balanced generation state. Extensive experiments demonstrate that " + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "inline_equation", + "content": "MD^2 N" + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "text", + "content": " significantly outperforms existing state-of-the-art methods, achieving up to " + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 55, + 280, + 296, + 533 + ], + "type": "text", + "content": " improvement over IMDer on the CMU-MOSEI dataset. Project page: here." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 555, + 135, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 555, + 135, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 135, + 567 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 574, + 296, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 574, + 296, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 574, + 296, + 696 + ], + "type": "text", + "content": "Multimodal learning leverages complementary information from heterogeneous data sources such as audio, images, and text [4, 7, 20, 56], achieving impressive success in modeling complex real-world phenomena. Its effectiveness is demonstrated across diverse applications, including visual question answering (VQA) [25] and affective computing [1], etc. Despite this progress, most existing multimodal approaches assume the availability of all modalities during both training and inference. This assumption rarely holds in real-world deployments, where data completeness is of" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 315, + 255, + 392, + 350 + ], + "blocks": [ + { + "bbox": [ + 315, + 255, + 392, + 350 + ], + "lines": [ + { + "bbox": [ + 315, + 255, + 392, + 350 + ], + "spans": [ + { + "bbox": [ + 315, + 255, + 392, + 350 + ], + "type": "image", + "image_path": "73123b6c270d93d30585e54b1877bbf7dcfcc2e5b3d0e27909db8bb552351339.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 350, + 340, + 356 + ], + "lines": [ + { + "bbox": [ + 335, + 350, + 340, + 356 + ], + "spans": [ + { + "bbox": [ + 335, + 350, + 340, + 356 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "lines": [ + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "spans": [ + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "text", + "content": "Figure 1. We evaluate state-of-the-art recovery-based incomplete multimodal learning models [3, 40, 50-52] on the MOSEI dataset under severe missing-modality scenarios, where " + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 313, + 366, + 555, + 455 + ], + "type": "text", + "content": " denote text, acoustic, and visual modalities. Results show that most models perform unevenly across different missing-modality conditions, especially struggling when video data is missing. In contrast, our model achieves more balanced and robust performance across all missing scenarios." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 392, + 255, + 475, + 350 + ], + "blocks": [ + { + "bbox": [ + 392, + 255, + 475, + 350 + ], + "lines": [ + { + "bbox": [ + 392, + 255, + 475, + 350 + ], + "spans": [ + { + "bbox": [ + 392, + 255, + 475, + 350 + ], + "type": "image", + "image_path": "82323591c5c0a7022cff976401bde4fbfd3b3dc14c58f08fd476fe2583529903.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 398, + 350, + 404, + 356 + ], + "lines": [ + { + "bbox": [ + 398, + 350, + 404, + 356 + ], + "spans": [ + { + "bbox": [ + 398, + 350, + 404, + 356 + ], + "type": "text", + "content": "CT" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 478, + 255, + 554, + 350 + ], + "blocks": [ + { + "bbox": [ + 478, + 255, + 554, + 350 + ], + "lines": [ + { + "bbox": [ + 478, + 255, + 554, + 350 + ], + "spans": [ + { + "bbox": [ + 478, + 255, + 554, + 350 + ], + "type": "image", + "image_path": "b2bf0d8142f41d1e75311bede07d97b725700b09c340d955dcffdf619409fd40.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 478, + 350, + 484, + 356 + ], + "lines": [ + { + "bbox": [ + 478, + 350, + 484, + 356 + ], + "spans": [ + { + "bbox": [ + 478, + 350, + 484, + 356 + ], + "type": "text", + "content": "R" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 483, + 555, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 555, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 555, + 555 + ], + "type": "text", + "content": "ten compromised by practical issues such as sensor failures [32] or communication bottlenecks [6]. The resulting partial modality availability introduces a critical challenge: it results in a mismatch between training and testing conditions, fundamentally undermining the robustness and generalizability of multimodal learning systems." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 558, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 558, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 558, + 556, + 715 + ], + "type": "text", + "content": "Current missing modality learning approaches mainly use deep generative frameworks, such as diffusion models [30], to reconstruct absent modalities conditioned on the available ones. While these methods [26, 30] show promising reconstruction results, we observe a critical limitation: modality generation bias [47, 57]. Specifically, there are significant differences in the difficulty of generating different modalities. For example, text can often be synthesized reliably from visual inputs (e.g., images or videos), but generating high-quality images or videos from text remains more challenging, often resulting in substantial quality degradation (see Fig. 1 for empirical results). This bias leads to imbalanced training, where models perform well in" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "text", + "content": "This ICCV paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 703, + 145, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 703, + 145, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 703, + 145, + 713 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24507" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 133 + ], + "type": "text", + "content": "some missing-modality scenarios but fail in others [14]. We argue that this limitation arises from the assumption that all modality generation tasks are of equal difficulty, ignoring the inherent differences between modalities and their generation complexities." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "text", + "content": "To address the challenge of biased missing modality recovery, we pose a question: Can the recovery process be performed in an intersecting manner by integrating a further modality transfer process from missing to available modalities? Motivated by this, we propose " + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{N}" + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "text", + "content": " (Multi-stage Duplex Diffusion Network), a novel framework that decomposes recovery into three sequential stages: (1) Global structure generation " + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "inline_equation", + "content": "(0,t_{1}])" + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "text", + "content": " : The model first reconstructs the coarse global structure of the target modality by leveraging cross-modal information, establishing a stable foundation for subsequent generation. (2) Modality transfer " + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "inline_equation", + "content": "(t_1,t_2]" + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "text", + "content": " : We introduce an intersecting transfer strategy that progressively integrates conditional information, enabling mutual knowledge flow between available and missing modalities, ensuring semantic alignment and learning of modality-invariant representations that capture shared characteristics across modalities. (3) Local cross-modal refinement " + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "inline_equation", + "content": "[t_2,T]" + }, + { + "bbox": [ + 56, + 134, + 295, + 373 + ], + "type": "text", + "content": " : In this stage, the model enhances local details to refine content quality, ensuring recovered data is both realistic and structurally coherent." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 375, + 295, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 375, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 55, + 375, + 295, + 567 + ], + "type": "text", + "content": "Our framework employs duplex diffusion models, allowing available and missing modalities to generate each other's data in an intersecting manner across all stages. Specifically, we adopt score-based diffusion models [43] as the generative backbone due to their ability to provide direct and flexible control over the reverse diffusion process, which is critical for accurately reconstructing complex modality structures. To further enhance generation stability, we introduce a time-step-based variance function that dynamically adjusts the noise variance at each diffusion step. This design effectively mitigates deviations and fluctuations caused by local perturbations, ensuring the generation process remains stable and coherent throughout. As a result, our method maintains global structural integrity while progressively enhancing fine-grained details, leading to high-quality and semantically aligned modality recovery outputs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 568, + 271, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 568, + 271, + 580 + ], + "spans": [ + { + "bbox": [ + 67, + 568, + 271, + 580 + ], + "type": "text", + "content": "In summary, we make the following contributions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 582, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 582, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 617 + ], + "type": "text", + "content": "- We empirically identify the issue of modality generation bias in recovery-based missing-modality models, which hinders the effectiveness of missing-modality recovery." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 618, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 295, + 653 + ], + "type": "text", + "content": "- We introduce a multi-stage diffusion process that decomposes the recovery task into three stages: global structure generation, modality transfer, and local detail refinement." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 654, + 295, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 295, + 701 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 295, + 701 + ], + "type": "text", + "content": "- We design a duplex cross-diffusion framework that simultaneously handles the diffusion processes for both available and missing modalities, facilitating the learning of modality-invariant knowledge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 701, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 701, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 701, + 295, + 713 + ], + "type": "text", + "content": "- We conduct extensive experiments on several benchmark" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 72, + 553, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 72, + 553, + 108 + ], + "spans": [ + { + "bbox": [ + 322, + 72, + 553, + 108 + ], + "type": "text", + "content": "datasets, demonstrating that our model outperforms existing methods, achieving up to a " + }, + { + "bbox": [ + 322, + 72, + 553, + 108 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 322, + 72, + 553, + 108 + ], + "type": "text", + "content": " improvement over the SOTA models (e.g., IMDer) on the CMU-MOSEI dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 119, + 406, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 119, + 406, + 131 + ], + "spans": [ + { + "bbox": [ + 314, + 119, + 406, + 131 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 139, + 553, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 139, + 553, + 331 + ], + "spans": [ + { + "bbox": [ + 313, + 139, + 553, + 331 + ], + "type": "text", + "content": "Mulimodal Learning. Recent advancements in multimodal learning have led to significant breakthroughs across cross-modal generation [19, 61, 62], contextual learning [21, 45], and modality fusion [13, 15, 67]. For example, Huang et al. [21] introduced Multimodal Task Vector (MTV), which compress multimodal exemplars into attention heads, circumventing context length constraints and enabling many-shot in-context learning. Zhou et al. [66] proposed a causal inference framework that leverages counterfactual reasoning and backdoor adjustment to mitigate modality prior-induced hallucinations, thereby enhancing the robustness of multimodal large language models. Yang et al. [58] developed ContextDIFF, a conditional diffusion model that propagates cross-modal context throughout the diffusion process, significantly improving text-guided visual synthesis and editing tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 331, + 553, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 331, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 553, + 449 + ], + "type": "text", + "content": "In the domain of multimodal sentiment analysis, similar approaches have been widely adopted. For instance, Zhu et al. [68] proposed a BERT and Faster R-CNN-based framework that employs co-attention mechanisms and gating strategies to effectively integrate textual and visual features, leading to improved sentiment classification accuracy. Despite these recent innovations, the inherent challenge of missing modalities remains a critical bottleneck in practical multimodal applications, underscoring the need for further research to address this limitation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 450, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 450, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 450, + 555, + 713 + ], + "type": "text", + "content": "Incomplete Multimodal Learning. Early research on missing modality recovery primarily focused on two approaches: removing incomplete samples [38] and recovering missing data [23, 55]. For example, FitRec [38] utilizes multimodal data during both training and prediction but fails to handle incomplete samples, leading to data depletion and model overfitting. Traditional imputation methods, which typically generate the absent modalities [7, 34, 39], encounter significant limitations when consecutive features are missing. Recently, deep learning-based methods, such as autoencoders [17, 18, 28, 37, 42] and Generative Adversarial Networks (GANs) [7, 39], have been applied to restore missing modalities in incomplete multimodal learning scenarios. However, these generative approaches often introduce additional noise, particularly when the number of modalities is large and sample completeness is low [11, 49]. More recently, diffusion models have been employed for modality recovery [12, 16, 41, 52]. For instance, IMDER [52] utilizes a diffusion model to restore missing modalities from Gaussian noise. Nevertheless, these methods consistently overlook a critical issue: modality generation bias, which hinders the quality and reliability of recovered data." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24508" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 300 + ], + "type": "text", + "content": "Diffusion Probabilistic Models. Diffusion models have achieved remarkable progress across various domains, including image restoration, 3D generation, and multimodal learning. For image restoration, Chan et al. [8] introduced the Dynamic Regulation Diffusion Anchoring mechanism (DRDA) to mitigate artifacts and color biases in low-light enhancement, while Li et al. [29] employed decoupled probabilistic modeling with uncertainty-guided attention to achieve high-quality reconstruction of complex textures. In 3D generation, Liu et al. [33] utilized 3D point cloud diffusion for modernizing traditional cultural elements, and Jo et al. [24] addressed semantic loss in text generation via statistical manifold mapping. In multimodal learning, diffusion models have been primarily applied to missing modality recovery. For example, Kebaili et al. [27] proposed an adaptive multimodal missing data completion framework that integrates an image-frequency fusion network (IFFN) with diffusion models to significantly improve medical image segmentation accuracy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 309, + 185, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 309, + 185, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 309, + 185, + 323 + ], + "type": "text", + "content": "3. The Proposed Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 329, + 296, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 329, + 296, + 389 + ], + "spans": [ + { + "bbox": [ + 55, + 329, + 296, + 389 + ], + "type": "text", + "content": "In this section, we detail our proposed method, covering the diffusion model preliminaries (Sec. 3.1), overall framework (Sec. 3.2), multimodal feature extractor (Sec. 3.3), duplex multi-stage diffused network (Sec. 3.3), and multimodal fusion (Sec. 3.4) for downstream tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 396, + 141, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 396, + 141, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 396, + 141, + 407 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 413, + 296, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 413, + 296, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 413, + 296, + 521 + ], + "type": "text", + "content": "Variance-preserving Diffusion Models (VPDM). In this work, we adopt the variance-preserving diffusion model [48] as the generator because it was demonstrated that the noise at each step is controlled to ensure stable and effective missing-modality data recovery. Specifically, VPDM is discretized the variance-preserving stochastic differential equation (VP-SDE) [44] with the Euler-Maruyama technique [35] and incorporates a time-step variance function " + }, + { + "bbox": [ + 55, + 413, + 296, + 521 + ], + "type": "inline_equation", + "content": "\\beta(t)" + }, + { + "bbox": [ + 55, + 413, + 296, + 521 + ], + "type": "text", + "content": " for dynamic noise adjustment as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 527, + 295, + 543 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 527, + 295, + 543 + ], + "spans": [ + { + "bbox": [ + 69, + 527, + 295, + 543 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t} = \\sqrt {1 - \\beta_ {t}} \\mathbf {x} _ {t - 1} + \\sqrt {\\beta_ {t}} \\epsilon_ {t - 1}, t = 1, 2, \\dots , T, (1)", + "image_path": "6f7e66e6c6ac552fe11e82febbe15862da69a7ec4db34fd09b3594484c344d7a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 548, + 263, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 263, + 559 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 263, + 559 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 548, + 263, + 559 + ], + "type": "inline_equation", + "content": "\\epsilon_{t - 1}" + }, + { + "bbox": [ + 55, + 548, + 263, + 559 + ], + "type": "text", + "content": " represents independent Gaussian noise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 560, + 296, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 560, + 296, + 597 + ], + "spans": [ + { + "bbox": [ + 55, + 560, + 296, + 597 + ], + "type": "text", + "content": "Forward Processing. Following the standard Stochastic Differential Equation (SDE) [2, 43], the VP-SDE uses a variance-preserving mechanism to perturb the data " + }, + { + "bbox": [ + 55, + 560, + 296, + 597 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 55, + 560, + 296, + 597 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 86, + 601, + 295, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 601, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 86, + 601, + 295, + 625 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\mathbf {x} = - \\frac {1}{2} \\beta (t) \\mathbf {x} \\mathrm {d} t + \\sqrt {\\beta (t)} \\mathrm {d} \\mathbf {w}, t \\in [ 0, T ], \\tag {2}", + "image_path": "a180b5f20ca4d74ec5a3b430e6e8c318a3850447fccb541fcac9b89d3c20d9cd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "type": "inline_equation", + "content": "\\beta (t)" + }, + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "type": "text", + "content": " is a time-dependent variance function controlling noise intensity and " + }, + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 55, + 630, + 295, + 654 + ], + "type": "text", + "content": " denotes a Wiener process [64]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 654, + 295, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 295, + 678 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 295, + 678 + ], + "type": "text", + "content": "Reverse Processing. As [5, 36, 48], the reverse-time VP-SDE for sample generation during the recovery process is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 693, + 296, + 717 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 693, + 296, + 717 + ], + "spans": [ + { + "bbox": [ + 61, + 693, + 296, + 717 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\mathbf {x} = - \\frac {1}{2} \\beta (t) (\\mathbf {x} \\mathrm {d} t - \\nabla_ {\\mathbf {x}} \\log p _ {t} (\\mathbf {x}) \\mathrm {d} t) + \\sqrt {\\beta (t)} \\mathrm {d} \\bar {\\mathbf {w}} \\tag {3}", + "image_path": "636007fa2977e11e81f4953c5971b60a285ddf3994b15da6ebf64bb9f0832d06.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "type": "text", + "content": "For discrete time steps " + }, + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "type": "inline_equation", + "content": "t \\in 1,2,\\dots,T" + }, + { + "bbox": [ + 313, + 72, + 553, + 96 + ], + "type": "text", + "content": ", the reverse process can be represented as an iterative update as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 340, + 102, + 553, + 128 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 102, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 340, + 102, + 553, + 128 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t - 1} = \\frac {1}{\\sqrt {1 - \\beta_ {t}}} \\left(\\mathbf {x} _ {t} + \\beta_ {t} s _ {\\theta} \\left(\\mathbf {x} _ {t}, t\\right)\\right) + \\sqrt {\\beta_ {t}} \\epsilon_ {t} \\tag {4}", + "image_path": "e23b6b3aa905c080274a08a1e8841c56256640bfba8e9e7bfb52633b485e0173.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "spans": [ + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "inline_equation", + "content": "s_{\\theta}(\\mathbf{x}_t,t)" + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "text", + "content": " serves as an approximation of " + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "inline_equation", + "content": "\\nabla_{\\mathbf{x}}\\log p_t(\\mathbf{x})" + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "text", + "content": " using the score network " + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 313, + 134, + 553, + 157 + ], + "type": "text", + "content": " [36, 44, 54]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "spans": [ + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "content": "Score-matching Loss Function. To optimize the score-matching loss and the score network " + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "content": ", we leverage the transition kernel " + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "inline_equation", + "content": "p_{0t}(\\mathbf{x}(t) | \\mathbf{x}(0))" + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "content": " in the VP-SDE. This kernel approximates the conditional distribution of the state " + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{x}(t)" + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "content": " at any time " + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "content": ", given the initial state " + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{x}(0)" + }, + { + "bbox": [ + 313, + 158, + 555, + 254 + ], + "type": "text", + "content": ". Specifically, the transition kernel follows a Gaussian distribution, with both its mean and covariance determined by the initial state and the cumulative noise function over time. The equation is as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 326, + 261, + 553, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 261, + 553, + 275 + ], + "spans": [ + { + "bbox": [ + 326, + 261, + 553, + 275 + ], + "type": "interline_equation", + "content": "p _ {0 t} (\\mathbf {x} (t) | \\mathbf {x} (0)) = \\mathcal {N} (\\mathbf {x} (t); \\mathbf {x} (0) \\varphi (t), \\mathbf {I} (1 - \\varphi (t)), \\tag {5}", + "image_path": "d0d61ba0f8ec15e3ddd38b361a6360b33ed9ce848041101711182b1b18eea992.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "inline_equation", + "content": "\\varphi(t) = e^{-\\frac{1}{2}\\int_0^t\\beta(s)\\mathrm{d}s}" + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "text", + "content": " is a decay factor that controls the diffusion process up to time " + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "text", + "content": ". The term " + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{x}(0)\\varphi(t)" + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "text", + "content": " represents the gradual decay of the signal over time, and the covariance " + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{I} - \\mathbf{I}\\varphi(t)" + }, + { + "bbox": [ + 313, + 281, + 554, + 342 + ], + "type": "text", + "content": " reflects the cumulative noise introduced during the diffusion process." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 342, + 553, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 553, + 365 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 553, + 365 + ], + "type": "text", + "content": "Using this, the optimization of the score-matching loss is formulated as follows:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 321, + 371, + 553, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 371, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 321, + 371, + 553, + 407 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathbb {E} _ {\\mathbf {x}, \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I}), t \\sim \\mathcal {U} (0, T)} \\left\\| \\frac {\\epsilon}{\\sqrt {\\lambda (t)}} + s _ {\\theta} (\\mathbf {x} (t), t) \\right\\| _ {2} ^ {2}, \\tag {6}", + "image_path": "addead89b869302f422e1ec1a1866d4834a98b9c6282ea2140d3c39447d2aca9.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "spans": [ + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{U}(0,T)" + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "text", + "content": " is a uniform distribution over the time interval " + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "inline_equation", + "content": "[0,T]" + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "inline_equation", + "content": "\\lambda (t) = \\mathbf{I} - \\mathbf{I}e^{-\\frac{1}{2}\\int_0^t\\beta (s)\\mathrm{d}s}" + }, + { + "bbox": [ + 313, + 412, + 554, + 450 + ], + "type": "text", + "content": " is a weighting function to balance the loss at different time steps." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 456, + 430, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 456, + 430, + 468 + ], + "spans": [ + { + "bbox": [ + 314, + 456, + 430, + 468 + ], + "type": "text", + "content": "3.2. Overall Framework" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "content": "The overall framework, illustrated in Fig. 2(a), consists of three core components: Multimodal Feature Extractor, Multi-stage Duplex Diffusion Network, and Multimodal Fusion module. In the feature extractor, three independent encoders, " + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_K" + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "inline_equation", + "content": "K\\in \\{L,V,A\\}" + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "content": ", are utilized to extract features from the text " + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "inline_equation", + "content": "(L)" + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "content": ", vision " + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "inline_equation", + "content": "(V)" + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "content": ", and acoustic " + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "inline_equation", + "content": "(A)" + }, + { + "bbox": [ + 313, + 474, + 555, + 714 + ], + "type": "text", + "content": " modalities, respectively. The Multi-stage Duplex Diffusion Network adopts a cross-diffusion architecture to generate both available and missing modality data through three sequential stages: (1) Global Structure Generation, where cross-modal information is integrated to establish a coherent global structural framework; (2) Modality Transfer, which is progressively introduced to prevent premature modality dominance and ensure semantic alignment across modalities; and (3) Local Detail Refinement, which enhances fine-grained features to improve the quality and authenticity of the generated samples. Finally, the Multimodal Fusion module consolidates the recovered and available modality representations and utilizes a multimodal Transformer for downstream prediction tasks." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24509" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 85, + 549, + 213 + ], + "blocks": [ + { + "bbox": [ + 61, + 74, + 126, + 83 + ], + "lines": [ + { + "bbox": [ + 61, + 74, + 126, + 83 + ], + "spans": [ + { + "bbox": [ + 61, + 74, + 126, + 83 + ], + "type": "text", + "content": "(a) Training Process" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 61, + 85, + 549, + 213 + ], + "lines": [ + { + "bbox": [ + 61, + 85, + 549, + 213 + ], + "spans": [ + { + "bbox": [ + 61, + 85, + 549, + 213 + ], + "type": "image", + "image_path": "d59edff0aedc032680201cd039e2aebead77014b075b99cc6b6639902955af38.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 222, + 546, + 241 + ], + "lines": [ + { + "bbox": [ + 436, + 222, + 546, + 241 + ], + "spans": [ + { + "bbox": [ + 436, + 222, + 546, + 241 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_K" + }, + { + "bbox": [ + 436, + 222, + 546, + 241 + ], + "type": "text", + "content": " Multimodal Feature Extractor " + }, + { + "bbox": [ + 436, + 222, + 546, + 241 + ], + "type": "inline_equation", + "content": "(K\\in \\{L,A,V\\})" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 60, + 232, + 425, + 326 + ], + "blocks": [ + { + "bbox": [ + 63, + 220, + 131, + 229 + ], + "lines": [ + { + "bbox": [ + 63, + 220, + 131, + 229 + ], + "spans": [ + { + "bbox": [ + 63, + 220, + 131, + 229 + ], + "type": "text", + "content": "(b) Inference Process" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 232, + 425, + 326 + ], + "lines": [ + { + "bbox": [ + 60, + 232, + 425, + 326 + ], + "spans": [ + { + "bbox": [ + 60, + 232, + 425, + 326 + ], + "type": "image", + "image_path": "f0cce62de6ce8d4cbb13c53891462a86f87d2e35bd747b743a5074d686b2e65f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 437, + 245, + 520, + 263 + ], + "lines": [ + { + "bbox": [ + 437, + 245, + 520, + 263 + ], + "spans": [ + { + "bbox": [ + 437, + 245, + 520, + 263 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_K" + }, + { + "bbox": [ + 437, + 245, + 520, + 263 + ], + "type": "text", + "content": " Multimodal Decoder " + }, + { + "bbox": [ + 437, + 245, + 520, + 263 + ], + "type": "inline_equation", + "content": "(K\\in \\{L,A,V\\})" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 437, + 269, + 544, + 279 + ], + "lines": [ + { + "bbox": [ + 437, + 269, + 544, + 279 + ], + "spans": [ + { + "bbox": [ + 437, + 269, + 544, + 279 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_t" + }, + { + "bbox": [ + 437, + 269, + 544, + 279 + ], + "type": "text", + "content": " The Predicted Latent Feature" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 436, + 284, + 512, + 302 + ], + "lines": [ + { + "bbox": [ + 436, + 284, + 512, + 302 + ], + "spans": [ + { + "bbox": [ + 436, + 284, + 512, + 302 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t^m" + }, + { + "bbox": [ + 436, + 284, + 512, + 302 + ], + "type": "text", + "content": " Latent Feature of Missing Modality" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 436, + 304, + 517, + 323 + ], + "lines": [ + { + "bbox": [ + 436, + 304, + 517, + 323 + ], + "spans": [ + { + "bbox": [ + 436, + 304, + 517, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t^a" + }, + { + "bbox": [ + 436, + 304, + 517, + 323 + ], + "type": "text", + "content": " Latent Feature of Available Modality" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 55, + 337, + 555, + 392 + ], + "lines": [ + { + "bbox": [ + 55, + 337, + 555, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 337, + 555, + 392 + ], + "type": "text", + "content": "Figure 2. Overview of the proposed framework, which comprises three key components: multimodal feature extraction, a multi-stage duplex diffusion network, and multimodal fusion. Independent encoders first extract features from text (L), vision (V), and acoustic (A) modalities. The diffusion network then recovers missing modalities through three stages—global structure generation establishes a consistent cross-modal foundation, modality transferring achieves semantic alignment, and local detail refinement enhances fine-grained features. Finally, a multimodal Transformer fuses the refined representations for downstream prediction." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 412, + 294, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 412, + 294, + 426 + ], + "spans": [ + { + "bbox": [ + 55, + 412, + 294, + 426 + ], + "type": "text", + "content": "3.3. " + }, + { + "bbox": [ + 55, + 412, + 294, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{MD}^2\\mathbf{N}" + }, + { + "bbox": [ + 55, + 412, + 294, + 426 + ], + "type": "text", + "content": ": Multi-stage Duplex Diffusion Network" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "text", + "content": "Motivation. Existing recovery-based missing modality learning models [51, 52] predominantly utilize conditional generative networks to directly reconstruct missing modality data. However, these approaches often fail to account for the inherent complexities and disparities among modalities, leading to substantial generation biases. We hypothesize that such biases arise from the modality gap, which reflects differences in data characteristics and structural representations across modalities. To address this limitation, we introduce a cross-modality transfer step within the generation process, specifically during the time interval " + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "inline_equation", + "content": "t \\in (t_1, t_2]" + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "text", + "content": " of the overall diffusion process " + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "inline_equation", + "content": "(0, T]" + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "text", + "content": ". This step is designed to preserve modality-invariant knowledge while effectively transferring modality-variant information. To achieve this, we propose Multi-stage Duplex Diffusion Networks (MD" + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 55, + 434, + 296, + 685 + ], + "type": "text", + "content": "N), where two diffusion models collaborate by reconstructing each other's data through a cross-modality generation module. This collaborative mechanism enables each modality to contribute its unique information, resulting in more accurate, semantically aligned, and unbiased reconstruction of missing modalities." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "Multimodal Feature Extractor. As illustrated in Fig. 2(a), the multimodal feature extractors process data from text (L)," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": "vision (V), and acoustic (A) modalities. The extraction network, denoted as " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_k" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "k\\in \\{L,V,A\\}" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": ", leverages modality-specific pre-trained encoders: BERT [10] for textual data, Facet [22] for visual inputs, and COVAREP [9] for audio. These encoders transform the raw modality data into a latent space. For each input " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": ", the encoders yield representations " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = \\{\\mathbf{x}^k\\}" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^k\\in \\mathbb{R}^{L\\times D}" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": " denoting the sequence length and " + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 313, + 413, + 555, + 556 + ], + "type": "text", + "content": " the latent dimensionality). Following [51, 52], each sample is assumed to have at least one available modality feature. Notably, during training, all modalities are complete; missing modality data occur only during the inference [40, 52, 53, 65]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "spans": [ + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "text", + "content": "Forward Process. As illustrated in Fig. 2, the duplex diffusion networks dilute both the available data " + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^a" + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "text", + "content": " and the missing modality data " + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^m" + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "text", + "content": " into Gaussian noise. Importantly, these two diffusion processes are intersectantly connected via the modality transferring process. We denote the output at each time step as " + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_t" + }, + { + "bbox": [ + 313, + 557, + 555, + 641 + ], + "type": "text", + "content": ", highlighted in deep blue in Fig. 2. The entire forward process is divided into three time stages:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "text", + "content": "(1) Stage 1: Global Structure Generation " + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "inline_equation", + "content": "(t\\in (0,t_1])" + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "text", + "content": " In this stage, noise is injected into the features " + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^m (t)" + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^a (t)" + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "text", + "content": ". For example, the forward process for " + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^m (t)" + }, + { + "bbox": [ + 313, + 642, + 554, + 689 + ], + "type": "text", + "content": ", based on Eq. (1), is formulated as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 364, + 700, + 555, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 700, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 364, + 700, + 555, + 715 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t} ^ {m} = \\sqrt {1 - \\beta_ {t}} \\mathbf {x} _ {t - 1} ^ {m} + \\sqrt {\\beta_ {t}} \\boldsymbol {\\epsilon} _ {t - 1}. \\qquad (7)", + "image_path": "9346644bb780b6590d0ead867d33770332c2eeb83e5b7be59abae9800763911c.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24510" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 282, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 282, + 84 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 282, + 84 + ], + "type": "text", + "content": "Similarly, " + }, + { + "bbox": [ + 55, + 72, + 282, + 84 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_t^a" + }, + { + "bbox": [ + 55, + 72, + 282, + 84 + ], + "type": "text", + "content": " can be obtained through the same process." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": "(2) Stage 2: Modality Transfer " + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "inline_equation", + "content": "(t\\in (t_1,t_2])" + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": ". In this stage, the latent features " + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t^m" + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t^a" + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": " undergo cross-timestep transferring until " + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": ". Taking the transfer from " + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^a\\rightarrow \\mathbf{x}^m" + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": " as an example, the forward process for " + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_t^m" + }, + { + "bbox": [ + 55, + 84, + 296, + 133 + ], + "type": "text", + "content": " is written as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 153, + 296, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 153, + 296, + 168 + ], + "spans": [ + { + "bbox": [ + 68, + 153, + 296, + 168 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t} ^ {m} = \\sqrt {1 - \\beta_ {t}} \\tilde {\\mathbf {x}} _ {t - 1} ^ {m} + \\sqrt {\\beta_ {t}} \\epsilon_ {t - 1} - \\Phi \\big (\\mathbf {x} _ {0} ^ {m} - \\mathbf {x} _ {0} ^ {a} \\big), (8)", + "image_path": "9ad94db0f4cd052d49a4f72a16388dc9d9e49a0104467d784c8b3c59689717be.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "spans": [ + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "type": "text", + "content": " is a coefficient derived by aligning the model with Eq. (1), with its detailed derivation provided in the supplementary materials. The reverse transfer from " + }, + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^m\\rightarrow \\mathbf{x}^a" + }, + { + "bbox": [ + 55, + 177, + 296, + 225 + ], + "type": "text", + "content": " follows an analogous formulation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "spans": [ + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": "(3) Stage 3: Local Cross-Modal Refinement " + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "inline_equation", + "content": "(t \\in (t_2, T])" + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": ". In the final stage, the injected noise disrupts the latent features " + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t^a" + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t^m" + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": " into Gaussian noise. For simplicity, we use " + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": " to denote either superscript " + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 55, + 226, + 296, + 286 + ], + "type": "text", + "content": ". The forward process in this stage is expressed as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 295, + 296, + 311 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 295, + 296, + 311 + ], + "spans": [ + { + "bbox": [ + 107, + 295, + 296, + 311 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t} ^ {*} = \\sqrt {1 - \\beta_ {t}} \\mathbf {x} _ {t - 1} ^ {*} + \\sqrt {\\beta_ {t}} \\epsilon_ {t - 1}. \\qquad (9)", + "image_path": "115472dac9b65adeb885eae103af5fffc632064dce1365f8ca3255b713e180ed.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "spans": [ + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "text", + "content": "Reverse Process. Based on the time-dependent score model " + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "text", + "content": ", we construct the corresponding reverse-time SDE and numerically simulate it to generate samples from " + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "inline_equation", + "content": "p_0" + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "text", + "content": ". Starting from samples " + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{x}(T) \\sim p_T" + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "text", + "content": ", the process is reversed to obtain " + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{x}(0) \\sim p_0" + }, + { + "bbox": [ + 55, + 320, + 296, + 380 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "content": "As shown in Eq. (7) and Eq. (9), for the time stages " + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "inline_equation", + "content": "(0, t_1]" + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "inline_equation", + "content": "(t_2, T]" + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "content": ", the forward latent " + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_t^*" + }, + { + "bbox": [ + 55, + 380, + 296, + 428 + ], + "type": "text", + "content": " aligns with the discretized VP-SDE formulation. Therefore, both stages share the same parameterized reverse iteration, formulated as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 60, + 437, + 296, + 463 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 437, + 296, + 463 + ], + "spans": [ + { + "bbox": [ + 60, + 437, + 296, + 463 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t - 1} ^ {*} = \\frac {1}{\\sqrt {1 - \\beta_ {t}}} \\left(\\mathbf {x} _ {t} ^ {*} + \\beta_ {t} s _ {\\theta} ^ {(*)} (\\tilde {\\mathbf {x}} _ {t} ^ {*}, y, t)\\right) + \\sqrt {\\beta_ {t}} \\epsilon_ {t}, (1 0)", + "image_path": "35eafe374669b9957d72096a7d43fc526f3c6c9c541635995ee896e2853ee2e7.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{(*)}(\\tilde{\\mathbf{x}}_t^*,y,t)" + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "content": " is the prediction network estimating " + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0^*" + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "content": " from the noisy latent feature " + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_t^*" + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "content": " denotes the generated other modalities. Similarly, for the stage " + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "inline_equation", + "content": "t\\in (t_2,T]" + }, + { + "bbox": [ + 55, + 472, + 296, + 522 + ], + "type": "text", + "content": ", the reverse process is written as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 60, + 531, + 296, + 556 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 531, + 296, + 556 + ], + "spans": [ + { + "bbox": [ + 60, + 531, + 296, + 556 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t - 1} ^ {*} = \\frac {1}{\\sqrt {1 - \\beta_ {t}}} \\left(\\mathbf {x} _ {t} ^ {*} + \\beta_ {t} s _ {\\theta} ^ {(*)} (\\tilde {\\mathbf {x}} _ {t} ^ {*}, y, t)\\right) + \\sqrt {\\beta_ {t}} \\epsilon_ {t}. \\tag {11}", + "image_path": "10185996bd6ea7c8edb63bbf87e9c3b7297afa5db03bac8e2bce24f7d38de532.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "spans": [ + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "text", + "content": "Following Eq. (3), the modality transferring process from " + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^a" + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^m" + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "inline_equation", + "content": "t\\in (t_1,t_2]" + }, + { + "bbox": [ + 55, + 565, + 295, + 589 + ], + "type": "text", + "content": " is expressed as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 598, + 296, + 632 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 598, + 296, + 632 + ], + "spans": [ + { + "bbox": [ + 55, + 598, + 296, + 632 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t - 1} ^ {a} = \\frac {1}{\\sqrt {1 - \\beta_ {t}}} \\left(\\tilde {\\mathbf {x}} _ {t} ^ {a} - \\Phi \\left(\\mathbf {x} _ {0} ^ {m} - \\mathbf {x} _ {0} ^ {a}\\right) + \\beta_ {t} s _ {\\theta} ^ {(m)} \\left(\\tilde {\\mathbf {x}} _ {t} ^ {m}, y, t\\right)\\right) + \\sqrt {\\beta_ {t}} \\epsilon_ {t}. \\tag {12}", + "image_path": "730a5bb4351ad04637703f261736abd888d89f0e4cff726ca9c77a39485170d1.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "spans": [ + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "text", + "content": " denotes the transfer coefficient derived in the supplementary materials. Conversely, the process from " + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^m" + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^a" + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "inline_equation", + "content": "t\\in (t_1,t_2]" + }, + { + "bbox": [ + 55, + 633, + 296, + 670 + ], + "type": "text", + "content": " is formulated as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 678, + 296, + 713 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 678, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 678, + 296, + 713 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {x}} _ {t - 1} ^ {m} = \\frac {1}{\\sqrt {1 - \\beta_ {t}}} \\left(\\tilde {\\mathbf {x}} _ {t} ^ {m} - \\Phi \\left(\\mathbf {x} _ {0} ^ {a} - \\mathbf {x} _ {0} ^ {m}\\right) + \\beta_ {t} s _ {\\theta} ^ {(a)} \\left(\\tilde {\\mathbf {x}} _ {t} ^ {a}, y, t\\right)\\right) + \\sqrt {\\beta_ {t}} \\epsilon_ {t}. \\tag {13}", + "image_path": "ebc5eb49666ea82482ba2a76a8fdadcad4ec85420e2558c071791d6e13840cef.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "content": "Diffusion Optimization Objective. As shown in Fig. 2(a), the optimization objectives for " + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{(a)}(\\tilde{\\mathbf{x}}_t^a, y, t)" + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{(m)}(\\tilde{\\mathbf{x}}_t^m, y, t)" + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "content": " follow the formulation in Eq. (6), and are denoted as " + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_a" + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_m" + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "content": ", respectively. The overall optimization objective " + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{score}}" + }, + { + "bbox": [ + 313, + 72, + 555, + 137 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 394, + 146, + 555, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 146, + 555, + 159 + ], + "spans": [ + { + "bbox": [ + 394, + 146, + 555, + 159 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s c o r e}} = \\mathcal {L} _ {a} + \\mathcal {L} _ {m}, \\tag {14}", + "image_path": "67fc62ab43e71432b3db4444193ef3fe5b960f8232dd9d830d7f19cb1f416828.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 169, + 441, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 169, + 441, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 169, + 441, + 180 + ], + "type": "text", + "content": "where each " + }, + { + "bbox": [ + 313, + 169, + 441, + 180 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_*" + }, + { + "bbox": [ + 313, + 169, + 441, + 180 + ], + "type": "text", + "content": " is computed as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 190, + 553, + 234 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 190, + 553, + 234 + ], + "spans": [ + { + "bbox": [ + 317, + 190, + 553, + 234 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {*} = \\mathbb {E} _ {\\mathbf {x} ^ {*}, \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I}), t \\sim \\mathcal {U} (t _ {1}, T)} \\left\\| s _ {\\theta} ^ {(*)} \\left(\\tilde {\\mathbf {x}} _ {t} ^ {*}, y, t\\right) + \\frac {\\epsilon}{\\sqrt {\\lambda (t)}} \\right\\| _ {2} ^ {2}, \\tag {15}", + "image_path": "58e2d069eb93bdcd64fc6d186998c9d8037c3328ee9634934569dfe4ce3bf3ae.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "spans": [ + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "content": " denotes either " + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "inline_equation", + "content": "\\lambda(t)" + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "content": " is the noise scaling factor defined in Eq. (6), and " + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "inline_equation", + "content": "\\mathcal{U}(t_1, T)" + }, + { + "bbox": [ + 313, + 235, + 553, + 270 + ], + "type": "text", + "content": " denotes the uniform distribution over the diffusion time interval." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 270, + 553, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 270, + 553, + 306 + ], + "spans": [ + { + "bbox": [ + 313, + 270, + 553, + 306 + ], + "type": "text", + "content": "Besides, we deploy a decoder to reconstruct the generated data. Thus, we leverage reconstruction loss to optimize the generated data by:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 358, + 315, + 553, + 329 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 315, + 553, + 329 + ], + "spans": [ + { + "bbox": [ + 358, + 315, + 553, + 329 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d e c}} = \\left\\| \\hat {\\mathbf {x}} ^ {m} - \\mathbf {x} ^ {m} \\right\\| _ {2} ^ {2} + \\left\\| \\hat {\\mathbf {x}} ^ {a} - \\mathbf {x} ^ {a} \\right\\| _ {2} ^ {2}. \\tag {16}", + "image_path": "f45e741b1633dedd3007cb81b3461b6ba7040424c5ab3d445a1237d803729b4a.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "spans": [ + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "text", + "content": "By combining " + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{score}}" + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{dec}}" + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "text", + "content": ", the objective of our multimodal recovery diffused network is " + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{rec}} = \\mathcal{L}_{\\mathrm{score}} + \\mathcal{L}_{\\mathrm{dec}}" + }, + { + "bbox": [ + 313, + 339, + 553, + 363 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": "Discussion. Building upon the duplex diffusion networks, we divide the inference recovery process into three distinct stages: global structure generation, modality transfer, and detail refinement. At inference (see Fig. 2(b)), since only the available modality " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "x^{a}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " is observed, we utilise the generation direction from " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "x^{a}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " to the missing modality " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "x^{m}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ". (1) Global structure generation stage " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "(t \\in [T, t_{2}))" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ": As illustrated in Fig. 2(c), this stage begins by sampling Gaussian noise " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\tilde{z}_{T}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " and generating the coarse structural representation of the missing modality " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "x^{m}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " by predicting its latent feature " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\tilde{x}_{t}^{a}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ". (2) Modality transfer stage " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "(t \\in [t_{2}, t_{1}))" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ": Here, modality transformation is performed by gradually converting the noised latent feature " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_{t_{2}}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_{t_{1}}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " of the target missing modality via the conditional score function " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{(m)}(\\tilde{\\mathbf{x}}_{t}^{m}, y, t)" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ". This process integrates cross-modal knowledge, enabling effective semantic transfer between modalities. (3) Detail refinement stage " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "(t \\in [t_{1}, 0))" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ": Finally, fine-grained details of the generated missing modality are refined by predicting " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{0}^{m}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": " from the noisy latent feature " + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{x}}_{t_{1}}^{m}" + }, + { + "bbox": [ + 313, + 363, + 555, + 604 + ], + "type": "text", + "content": ", enhancing the quality and realism of the reconstructed data." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 612, + 499, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 612, + 499, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 612, + 499, + 624 + ], + "type": "text", + "content": "3.4. Multimodal Fusion and Prediction" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "text", + "content": "For any missing pattern, the set of recovered data is denoted as " + }, + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{X}}^{\\mathrm{miss}}" + }, + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "text", + "content": ", while the available data is represented as " + }, + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^{\\mathrm{ava}}" + }, + { + "bbox": [ + 313, + 629, + 553, + 677 + ], + "type": "text", + "content": ". These are combined to form the complete multimodal input for downstream fusion and prediction tasks." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 677, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 553, + 714 + ], + "type": "text", + "content": "We employ multimodal Transformers [46] to fuse the features from " + }, + { + "bbox": [ + 313, + 677, + 553, + 714 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{X}}^{\\mathrm{miss}}\\cup \\mathcal{X}^{\\mathrm{ava}}" + }, + { + "bbox": [ + 313, + 677, + 553, + 714 + ], + "type": "text", + "content": ". The resulting fused representation is subsequently passed through multi-layer perceptrs" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24511" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 70, + 555, + 321 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 555, + 321 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 555, + 321 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 555, + 321 + ], + "type": "table", + "html": "
Model{L}{A}{V}{L,A}{L,V}{A,V}
CMU-MOSIDCCA [3]73.6 / 73.8 / 30.250.5 / 46.1 / 16.347.7 / 41.5 / 16.674.7 / 74.8 / 29.774.9 / 75.0 / 30.350.8 / 46.4 / 16.6
DCCAE [50]76.4 / 76.5 / 28.348.8 / 42.1 / 16.952.6 / 51.1 / 17.177.0 / 77.0 / 30.276.7 / 76.8 / 30.054.0 / 52.5 / 17.4
MCTN [40]79.1 / 79.2 / 41.056.1 / 54.5 / 16.555.0 / 54.4 / 16.381.0 / 81.0 / 43.281.1 / 81.2 / 42.157.5 / 57.4 / 16.8
TransM [53]80.1 / 80.0 / 41.255.2 / 55.0 / 15.255.8 / 55.8 / 16.282.2 / 82.3 / 43.982.1 / 82.1 / 42.058.1 / 58.0 / 17.2
ICDN [63]83.1 / 83.2 / 42.055.5 / 55.4 / 15.056.7 / 56.7 / 16.183.1 / 83.1 / 43.382.9 / 83.0 / 42.159.3 / 59.3 / 17.2
MMIN [65]83.8 / 83.8 / 41.655.3 / 51.5 / 15.557.0 / 54.0 / 15.584.0 / 84.0 / 42.383.8 / 83.9 / 42.060.4 / 58.5 / 19.5
GCNet [31]83.7 / 83.6 / 42.356.1 / 54.5 / 16.656.1 / 55.7 / 16.984.5 / 84.4 / 43.484.3 / 84.2 / 43.462.0 / 61.9 / 17.2
DiCMoR [51]84.5 / 84.4 / 44.360.5 / 60.8 / 20.962.2 / 60.2 / 20.985.5 / 85.5 / 44.685.5 / 85.4 / 45.264.0 / 63.5 / 21.9
IMDer [52]84.8 / 84.7 / 44.862.0 / 62.2 / 22.061.3 / 60.8 / 22.285.4 / 85.3 / 45.085.5 / 85.4 / 45.363.6 / 63.4 / 23.8
MD2N87.4 / 87.3 / 45.968.8 / 68.8 / 27.567.1 / 67.0 / 27.287.4 / 87.4 / 45.687.5 / 87.4 / 46.070.4 / 70.4 / 28.3
CMU-MOSEIDCCA [3]78.5 / 78.7 / 46.762.0 / 50.2 / 41.161.9 / 55.7 / 41.379.5 / 79.2 / 46.780.3 / 79.7 / 46.663.4 / 56.9 / 41.5
DCCAE [50]79.7 / 79.5 / 47.061.4 / 53.8 / 40.961.1 / 57.2 / 40.180.0 / 80.0 / 47.480.4 / 80.4 / 47.162.7 / 59.2 / 41.6
MCTN [40]82.6 / 82.8 / 50.262.7 / 54.5 / 41.562.6 / 57.1 / 41.683.5 / 83.3 / 50.783.2 / 83.2 / 50.463.7 / 62.7 / 42.1
TransM [53]82.3 / 82.3 / 49.660.2 / 57.1 / 40.060.8 / 59.6 / 41.283.6 / 83.3 / 51.183.4 / 83.3 / 50.064.0 / 63.3 / 41.9
ICDN [63]82.7 / 83.2 / 50.058.5 / 58.4 / 40.161.7 / 61.2 / 40.984.0 / 83.9 / 51.483.7 / 83.7 / 50.963.3 / 60.7 / 40.6
MMIN [65]82.3 / 82.4 / 51.458.9 / 59.5 / 40.459.3 / 60.0 / 40.783.7 / 83.7 / 52.083.8 / 83.4 / 51.263.5 / 61.9 / 41.8
GCNet [31]83.0 / 83.2 / 51.260.2 / 60.3 / 41.161.9 / 61.6 / 41.784.3 / 84.4 / 51.384.3 / 84.4 / 51.164.1 / 57.2 / 42.0
DiCMoR [51]84.2 / 84.3 / 52.462.9 / 60.4 / 41.463.6 / 63.6 / 42.085.0 / 84.9 / 52.784.9 / 84.9 / 53.065.2 / 64.4 / 42.4
IMDer [52]84.5 / 84.5 / 52.563.8 / 60.6 / 41.763.9 / 63.6 / 42.685.1 / 85.1 / 53.185.0 / 85.0 / 53.164.9 / 63.5 / 42.8
MD2N88.4 / 88.3 / 55.269.7 / 69.7 / 43.570.1 / 70.0 / 44.788.5 / 88.4 / 56.588.3 / 88.4 / 56.171.2 / 70.6 / 44.6
", + "image_path": "dc0a413c0d0ba1155c64984d5da52eabbbe6e91f5f0d28a919e70abcf93acda3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "lines": [ + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "spans": [ + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "text", + "content": "Table 1. Comparison with the state-of-the-arts on CMU-MOSI [59] and CMU-MOSEI [60] under fixed missing scenario. " + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "inline_equation", + "content": "\\{K\\}" + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "text", + "content": " means modality " + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "inline_equation", + "content": "\\{\\ast\\}" + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "text", + "content": " is available (" + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "inline_equation", + "content": "\\ast \\in \\{\\mathbf{L}, \\mathbf{A}, \\mathbf{V}\\}" + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "text", + "content": "). The values in each cell denote " + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "inline_equation", + "content": "\\mathrm{ACC}_2 / \\mathrm{F}_1 / \\mathrm{ACC}_7" + }, + { + "bbox": [ + 55, + 328, + 555, + 352 + ], + "type": "text", + "content": ". **Bold** is the best." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 371, + 295, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 371, + 295, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 371, + 295, + 396 + ], + "type": "text", + "content": "(MLPs) to produce the final predictions. The overall optimization objective is defined as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 406, + 294, + 419 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 406, + 294, + 419 + ], + "spans": [ + { + "bbox": [ + 129, + 406, + 294, + 419 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {t a s k}} + \\gamma \\mathcal {L} _ {\\text {r e c}}, \\tag {17}", + "image_path": "961fe71d4bdfac0c46005cce446570ddbb6ff49ffceece3349ffc2077f15183c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "spans": [ + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{task}}" + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "text", + "content": " denotes the task-specific loss, implemented as cross-entropy loss in our experiments, and " + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "text", + "content": " is a balancing coefficient that controls the relative importance of the reconstruction loss " + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{rec}}" + }, + { + "bbox": [ + 55, + 428, + 296, + 513 + ], + "type": "text", + "content": ". The entire optimization is conducted in an end-to-end manner. Detailed training configurations, including modality missing rate settings, are provided in the experimental section." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 524, + 137, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 524, + 137, + 538 + ], + "spans": [ + { + "bbox": [ + 55, + 524, + 137, + 538 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 544, + 296, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 296, + 579 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 296, + 579 + ], + "type": "text", + "content": "In this section, we conduct extensive experiments on the missing modality multimodal learning and a suite of ablation studies." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 588, + 251, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 588, + 251, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 588, + 251, + 601 + ], + "type": "text", + "content": "4.1. Datasets and Implementation Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "text", + "content": "Datasets. To verify the effectiveness of " + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{M}" + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "text", + "content": ", we conduct experiments on two multimodal sentiment analysis datasets: CMU-MOSI [59] and CMU-MOSEI [60]. Each sample is labeled with a sentiment score ranging from -3 (strongly negative) to +3 (strongly positive). We evaluate the performance using the following metrics: 7-class accuracy " + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "inline_equation", + "content": "(\\mathrm{ACC}_7)" + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "text", + "content": ", binary accuracy " + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "inline_equation", + "content": "(\\mathrm{ACC}_2)" + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "inline_equation", + "content": "\\mathrm{F}_1" + }, + { + "bbox": [ + 55, + 605, + 295, + 689 + ], + "type": "text", + "content": " score." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "Baseline. We compare " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{M}" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": " with several state-of-the-art incomplete multimodal learning methods, including" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 371, + 555, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 371, + 555, + 408 + ], + "spans": [ + { + "bbox": [ + 313, + 371, + 555, + 408 + ], + "type": "text", + "content": "recovery-based methods (MCTN [40], TransM [53], ICDN [63], MMIN [65], GCNet [31], DiCMoR [51], IMDer [52]) and non-recovery methods (DCCA [3], DCCAE [50])." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": "Implementation Details. On the two datasets, we extract the text features via pre-trained BERT model text[10] and obtain a 768-dimensional hidden state as the word features. For visual modality, each video frame was encoded via Facet [22] to represent the presence of the total 35 facial action units [9]. The acoustic modality was processed by COVAREP [9] to obtain the 74-dimensional features. Each experiment was run five times, and the average results on the test set are reported, using PyTorch on an NVIDIA A800 GPU. We explore the effectiveness of various methods in two distinct scenarios: one where a specific modality is consistently missing, and another where the missing modality is randomly selected. For the fixed missing modality scenario, we systematically discard either one modality (i.e. " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\{L,A\\}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\{L,V\\}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\{A,V\\}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ") or two modalities (i.e. " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\{L\\}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\{A\\}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\{A\\}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ") throughout the evaluation. For the random missing scenario, we define the missing rate " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{miss}} = (1 - \\frac{\\sum_{i=1}^{N} m_i}{N \\times M}) \\times 100\\%" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " to quantify the overall extent of missing modalities across the samples, where " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " denotes the total number of modalities, " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "m_i" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " represents the number of available modalities for " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " sample and " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " corresponds to the total number of modalities. In the case of three modalities, we select eight values of " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "r_{\\mathrm{miss}}" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " from the range " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "[0\\%, 10\\%, 20\\%, \\ldots, 70\\%]" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 313, + 411, + 556, + 715 + ], + "type": "text", + "content": " represents the max approximate missing rate while ensuring that at least" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24512" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 71, + 555, + 163 + ], + "blocks": [ + { + "bbox": [ + 60, + 71, + 555, + 163 + ], + "lines": [ + { + "bbox": [ + 60, + 71, + 555, + 163 + ], + "spans": [ + { + "bbox": [ + 60, + 71, + 555, + 163 + ], + "type": "image", + "image_path": "d67fba6002d69b2ce8c4d4e225909d315f03892ef8e75ad38457a3513c52cd67.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 175, + 501, + 186 + ], + "lines": [ + { + "bbox": [ + 107, + 175, + 501, + 186 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 501, + 186 + ], + "type": "text", + "content": "Figure 3. Comparison results on CMU-MOSI [59] and CMU-MOSEI [60] under randomly missing scenario." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 209, + 232, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 209, + 232, + 220 + ], + "spans": [ + { + "bbox": [ + 55, + 209, + 232, + 220 + ], + "type": "text", + "content": "one modality is available at any given time." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 227, + 254, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 227, + 254, + 239 + ], + "spans": [ + { + "bbox": [ + 55, + 227, + 254, + 239 + ], + "type": "text", + "content": "4.2. Comparison with the state-of-the-arts" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 244, + 296, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 244, + 296, + 280 + ], + "spans": [ + { + "bbox": [ + 55, + 244, + 296, + 280 + ], + "type": "text", + "content": "Table 1 and Fig. 3 present the quantitative results of our model across both datasets. From the results, we could make the following key observations:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 281, + 296, + 483 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 281, + 296, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 281, + 296, + 340 + ], + "spans": [ + { + "bbox": [ + 55, + 281, + 296, + 340 + ], + "type": "text", + "content": "(1) Effectiveness of recovery-based models: Recovery-based methods, such as our " + }, + { + "bbox": [ + 55, + 281, + 296, + 340 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{N}" + }, + { + "bbox": [ + 55, + 281, + 296, + 340 + ], + "type": "text", + "content": " model, consistently outperform non-recovery approaches [3, 50]. This improvement is possibly attributed to their ability to utilize missing modality data more effectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 340, + 296, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 340, + 296, + 412 + ], + "spans": [ + { + "bbox": [ + 55, + 340, + 296, + 412 + ], + "type": "text", + "content": "(2) State-of-the-art performance: Among all recovery-based methods [31, 40, 53, 63, 65], our model achieves the best overall performance. We attribute this to its dual cross-diffusion structure, which dynamically controls noise and integrates cross-modal semantic information, preserving global consistency while enhancing local detail fidelity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 412, + 296, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 412, + 296, + 483 + ], + "spans": [ + { + "bbox": [ + 55, + 412, + 296, + 483 + ], + "type": "text", + "content": "(3) Robustness to missing patterns and rates: As shown in Tab. 1 and Fig. 3, our model exhibits smaller performance degradation under increasing missing rates compared to other recovery methods, indicating its strong robustness across different missing patterns and levels of modality incompleteness." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 491, + 149, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 491, + 149, + 504 + ], + "spans": [ + { + "bbox": [ + 55, + 491, + 149, + 504 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "spans": [ + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "text", + "content": "Impact of Different Configurations. To validate the effectiveness of different configurations, we ablate our " + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{N}" + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "text", + "content": " into three variants: (i) Base-model: Direct recovery from the available modality to the missing modality, similar to IMDer [52]. (ii) Adding VP-SDE Model: Using the VP-SDE technique for a score model, optimising time-step noise control to predict the missing modality " + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_0^m" + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "text", + "content": ". (iii) Adding with Multi-stage Model: Incorporating VP-SDE within a score model " + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{(m)}" + }, + { + "bbox": [ + 55, + 508, + 295, + 630 + ], + "type": "text", + "content": " and adopts a multi-stage diffusion process without duplex modeling." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 296, + 713 + ], + "type": "text", + "content": "As shown in Tab. 2, we make the following observations: (i) vs. (ii): Optimising time-step noise control in the VP-SDE model significantly improves the accuracy of missing modality predictions compared to the base model. (ii) vs. (iii): Adding a multi-stage model outperforms the one only using the VP-SDE model, indicating that the multi-stage diffusion process better captures cross-modal interactions." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 326, + 206, + 543, + 322 + ], + "blocks": [ + { + "bbox": [ + 326, + 206, + 543, + 322 + ], + "lines": [ + { + "bbox": [ + 326, + 206, + 543, + 322 + ], + "spans": [ + { + "bbox": [ + 326, + 206, + 543, + 322 + ], + "type": "table", + "html": "
DatasetMethod TypeResults
CMU-MOSI(i) Base-model76.5 / 73.4 / 35.2
(ii) + VP-SDE76.7 / 76.6 / 35.3
(iii) + Multi-stage77.1 / 77.1 / 36.0
(iv) Ours80.0 / 80.0 / 40.2
CMU-MOSEI(i) Base-model79.0 / 77.3 / 49.3
(ii) + VP-SDE79.4 / 78.0 / 49.5
(iii) + Multi-stage79.9 / 78.4 / 49.8
(iv) Ours82.6 / 82.6 / 50.3
", + "image_path": "2669b7bdd651ea2eb6dc35966fbca1abbf1e9fab049859ea5332db98442de0c3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 324, + 390, + 545, + 439 + ], + "blocks": [ + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "lines": [ + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "spans": [ + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "text", + "content": "Table 2. Ablation study on various configurations. The average results for missing rates ranging from " + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "text", + "content": " are shown in the cells, representing " + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "inline_equation", + "content": "\\mathrm{ACC}_2 / \\mathrm{F}_1 / \\mathrm{ACC}_7" + }, + { + "bbox": [ + 313, + 331, + 555, + 375 + ], + "type": "text", + "content": ". Bold indicates the best performance." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 324, + 390, + 545, + 439 + ], + "lines": [ + { + "bbox": [ + 324, + 390, + 545, + 439 + ], + "spans": [ + { + "bbox": [ + 324, + 390, + 545, + 439 + ], + "type": "table", + "html": "
MethodsCMU-MOSICMU-MOSEI
MCTN71.3 / 71.2 / 35.576.9 / 76.2 / 47.4
MCTN w/s(m,i)θi75.7 / 75.6 / 38.479.0 / 79.1 / 48.5
", + "image_path": "b6385507c8e9b238c6b9cfa6c307fd2002b42491319b4d278e820b7467e063ae.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "lines": [ + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "spans": [ + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "type": "text", + "content": "Table 3. Ablation study of multi-stage duplex diffusion network " + }, + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "type": "inline_equation", + "content": "{s}_{{\\theta }_{i}}^{\\left( m,i\\right) }" + }, + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "type": "text", + "content": " on MCTN[40] under 30% missing rate. The value in each cell denotes " + }, + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "type": "inline_equation", + "content": "{\\mathrm{{ACC}}}_{2}/{\\mathrm{F}}_{1}/{\\mathrm{{ACC}}}_{7}" + }, + { + "bbox": [ + 313, + 447, + 555, + 480 + ], + "type": "text", + "content": " . Bold is best." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 506, + 554, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 506, + 554, + 578 + ], + "spans": [ + { + "bbox": [ + 313, + 506, + 554, + 578 + ], + "type": "text", + "content": "(iii) vs. (iv): Our full model achieves the best performance across all metrics on both datasets, benefiting from the additional guidance of the duplex diffusion structure. This result demonstrates the effectiveness of our duplex training strategy in enhancing multimodal recovery and learning robust modality-invariant representations." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "text", + "content": "Effects of Multi-stage Duplex Diffused Network. To further evaluate the generalizability and effectiveness of our proposed multi-stage duplex diffusion network " + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "inline_equation", + "content": "(\\mathrm{MD}^2\\mathrm{N})" + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "text", + "content": " we integrate it into the MCTN [40]. As shown in Tab. 3, incorporating " + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{N}" + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "text", + "content": " into MCTN (denoted as MCTN " + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{w / s}_{\\theta_i}^{(m,i)}" + }, + { + "bbox": [ + 313, + 580, + 555, + 713 + ], + "type": "text", + "content": ") consistently outperforms the original MCTN [40] on both datasets, achieving approximately a 2-point performance improvement. These results demonstrate that our proposed module can be seamlessly integrated into existing models, providing consistent gains and highlighting its broad applicability and effectiveness." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24513" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 294, + 157 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 294, + 157 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 294, + 157 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 294, + 157 + ], + "type": "image", + "image_path": "768c7b846dc0fba4028d5bc6ae870baeb10466c0b0234c502b68ec2ac0ba5f35.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "lines": [ + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "spans": [ + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": "Figure 4. Ablation study on the effect of varying " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": " under " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": " missing rate on MOSEI. (a) shows the impact of changing " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "t_2 = 800" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": ", and (b) shows the effect of changing " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "t_1 = 200" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": ". Metrics: " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "\\mathrm{ACC}_2" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "\\mathrm{F}_1" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "inline_equation", + "content": "\\mathrm{ACC}_7" + }, + { + "bbox": [ + 55, + 165, + 296, + 211 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "spans": [ + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": "Effects of the Multi-Stage Process Configuration. We investigate the impact of varying " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " on model performance. When discretizing the time into 1000 steps, the configuration " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_1 = 200" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_2 = 800" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " achieves the best overall results across all evaluation metrics, as illustrated in Fig. 4. Specifically, in experiments where " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " is fixed at 800 and " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " is varied (Fig. 4(a)), we observe that as " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " decreases from 200 to 20, performance slightly declines. This degradation is attributed to the detailed refinement stage becoming too brief to sufficiently modify modality details. Conversely, when " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " increases beyond an optimal point, the conditional fusion stage shortens, which also leads to a drop in performance. Furthermore, as shown in Fig. 4(b), when " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_1" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " is fixed at 200 and " + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "inline_equation", + "content": "t_2" + }, + { + "bbox": [ + 54, + 232, + 295, + 434 + ], + "type": "text", + "content": " is varied, a longer conditional fusion stage consistently yields better results. These findings highlight the importance of balancing the durations of both stages to maximize overall model effectiveness." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "spans": [ + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "text", + "content": "Effects of the Multimodal Feature Extractor and Decoder. To evaluate the contributions of the multimodal feature extractor " + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_K" + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "text", + "content": " and the decoder " + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_K" + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "text", + "content": ", we conduct comparative experiments using four model variants. All configurations employ the full multi-stage duplex diffusion network, differing only in whether they include " + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_K" + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_K" + }, + { + "bbox": [ + 55, + 436, + 295, + 520 + ], + "type": "text", + "content": ". The results in Tab. 4 lead to the following observations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "type": "text", + "content": "Using both the feature extractor and decoder consistently yields the strongest performance on both CMU-MOSI and CMU-MOSEI, providing an improvement of approximately 2-5 points across all evaluation metrics compared to configurations lacking either component. This demonstrates that " + }, + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_K" + }, + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "type": "text", + "content": " effectively extracts modality-specific information from text, image, and audio inputs, while " + }, + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_K" + }, + { + "bbox": [ + 55, + 521, + 295, + 617 + ], + "type": "text", + "content": " enables accurate reconstruction of the original data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 618, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 295, + 715 + ], + "type": "text", + "content": "Excluding the feature extractor leads to a moderate performance drop of about 2 points, indicating its crucial role in mapping multimodal signals into a unified latent space and supporting robust cross-modal representation learning. Without " + }, + { + "bbox": [ + 55, + 618, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_K" + }, + { + "bbox": [ + 55, + 618, + 295, + 715 + ], + "type": "text", + "content": ", the model struggles to capture complementary cues across modalities. Similarly, removing the decoder results in a comparable performance decrease of around 3 points, highlighting its importance for reconstructing the" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 340, + 70, + 531, + 192 + ], + "blocks": [ + { + "bbox": [ + 340, + 70, + 531, + 192 + ], + "lines": [ + { + "bbox": [ + 340, + 70, + 531, + 192 + ], + "spans": [ + { + "bbox": [ + 340, + 70, + 531, + 192 + ], + "type": "table", + "html": "
DatasetsE_KD_KResults
CMU-MOSI83.4 / 83.4 / 42.9
X81.2 / 81.2 / 41.0
X80.4 / 80.2 / 40.4
XX78.5 / 78.5 / 37.3
CMU-MOSEI84.3 / 84.4 / 52.6
X82.4 / 82.2 / 50.2
X81.3 / 81.1 / 49.4
XX79.4 / 79.6 / 47.3
", + "image_path": "113eb6d97ba30b783f6ab6369bc9746fd3e3c9767b9b41ffc6260beade9cba8b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "lines": [ + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "spans": [ + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "type": "text", + "content": "Table 4. Ablation study on the effects of the multimodal feature extractor and decoder under " + }, + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "type": "text", + "content": " missing rate on MOSI ans MOSEI. The value in each cell denotes " + }, + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "type": "inline_equation", + "content": "\\mathrm{ACC}_2 / \\mathrm{F}_1 / \\mathrm{ACC}_7" + }, + { + "bbox": [ + 313, + 201, + 553, + 234 + ], + "type": "text", + "content": ". Bold is best." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 257, + 553, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 257, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 313, + 257, + 553, + 293 + ], + "type": "text", + "content": "original data from intermediate representations. The decoder ensures reliable recovery by optimizing the reconstruction that combines scoring and decoding losses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 294, + 555, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 294, + 555, + 366 + ], + "spans": [ + { + "bbox": [ + 313, + 294, + 555, + 366 + ], + "type": "text", + "content": "Finally, removing both modules causes the most substantial performance degradation, with a drop of roughly 5-7 points across datasets. This confirms that both components are critical for robust multimodal representation learning and for recovering missing modalities, especially under incomplete input conditions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 380, + 388, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 380, + 388, + 392 + ], + "spans": [ + { + "bbox": [ + 313, + 380, + 388, + 392 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "type": "text", + "content": "In this paper, we tackled the challenge of modality generation bias in multimodal learning for missing modality recovery. Existing diffusion-based approaches often struggle to balance generation quality across modalities due to inherent modality gaps. To address this issue, we proposed the Multi-Stage Duplex Diffusion Network " + }, + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "type": "inline_equation", + "content": "(\\mathbf{MD}^2\\mathbf{N})" + }, + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "type": "text", + "content": ", which introduces a modality transfer module to enable smooth and unbiased cross-modal generation. By leveraging a duplex diffusion framework consisting of three progressive stages—global structure generation, modality transfer, and local cross-modal refinement—our method facilitates mutual influence between available and missing modalities, resulting in a more balanced and effective recovery process. Extensive experiments demonstrate that " + }, + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{N}" + }, + { + "bbox": [ + 313, + 401, + 555, + 605 + ], + "type": "text", + "content": " substantially outperforms state-of-the-art methods, confirming its effectiveness in reducing modality generation bias and enhancing multimodal learning under missing modalities." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "type": "text", + "content": "In future work, we plan to extend our framework to handle scenarios involving more than three modalities simultaneously and explore adaptive stage configurations to automatically adjust the diffusion process based on input modality availability. Additionally, we aim to evaluate " + }, + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{MD}^2\\mathrm{N}" + }, + { + "bbox": [ + 313, + 606, + 556, + 715 + ], + "type": "text", + "content": " in real-world downstream tasks such as multimodal sentiment analysis under partial observation and cross-modal retrieval with missing modality conditions to further validate its generalizability and practical impact." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "24514" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 187 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 187 + ], + "type": "text", + "content": "This research was partially supported by the National Natural Science Foundation of China (NSFC) (granted No. 62306064), the Central-Guided Local Science and Technology Development (granted No. 2023ZYD0165), and Sichuan Science and Technology Program (granted No. 2024ZDZX0011). We appreciate all the authors for their fruitful discussions. In addition, thanks are extended to anonymous reviewers for their insightful comments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 198, + 115, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 198, + 115, + 211 + ], + "spans": [ + { + "bbox": [ + 56, + 198, + 115, + 211 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 219, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 61, + 219, + 296, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 219, + 296, + 273 + ], + "spans": [ + { + "bbox": [ + 61, + 219, + 296, + 273 + ], + "type": "text", + "content": "[1] Sharmeen M Saleem Abdullah Abdullah, Siddeeq Y Ameen Ameen, Mohammed AM Sadeeq, and Subhi Zeebaree. Multimodal emotion recognition using deep learning. Journal of Applied Science and Technology Trends, 2(01):73-79, 2021. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 61, + 274, + 296, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 274, + 296, + 306 + ], + "spans": [ + { + "bbox": [ + 61, + 274, + 296, + 306 + ], + "type": "text", + "content": "[2] Brian DO Anderson. Reverse-time diffusion equation models. Stochastic Processes and their Applications, 12(3):313-326, 1982. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 308, + 294, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 308, + 294, + 352 + ], + "spans": [ + { + "bbox": [ + 62, + 308, + 294, + 352 + ], + "type": "text", + "content": "[3] Galen Andrew, Raman Arora, Jeff Bilmes, and Karen Livescu. Deep canonical correlation analysis. In International conference on machine learning, pages 1247-1255. PMLR, 2013. 1, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 354, + 295, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 354, + 295, + 397 + ], + "spans": [ + { + "bbox": [ + 62, + 354, + 295, + 397 + ], + "type": "text", + "content": "[4] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence, 41(2):423-443, 2018. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 399, + 295, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 399, + 295, + 442 + ], + "spans": [ + { + "bbox": [ + 62, + 399, + 295, + 442 + ], + "type": "text", + "content": "[5] Georgios Batzolis, Jan Stanczuk, Carola-Bibiane Schonlieb, and Christian Etmann. Conditional image generation with score-based diffusion models. arXiv preprint arXiv:2111.13606, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 445, + 295, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 445, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 62, + 445, + 295, + 498 + ], + "type": "text", + "content": "[6] Filipe Betzel, Karen Khatamifard, Harini Suresh, David J Lilja, John Sartori, and Ulya Karpuzcu. Approximate communication: Techniques for reducing communication bottlenecks in large-scale parallel systems. ACM Computing Surveys (CSUR), 51(1):1-32, 2018. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 500, + 295, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 500, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 62, + 500, + 295, + 555 + ], + "type": "text", + "content": "[7] Lei Cai, Zhengyang Wang, Hongyang Gao, Dinggang Shen, and Shuiwang Ji. Deep adversarial learning for multimodality missing data completion. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining, pages 1158-1166, 2018. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 556, + 295, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 556, + 295, + 600 + ], + "spans": [ + { + "bbox": [ + 62, + 556, + 295, + 600 + ], + "type": "text", + "content": "[8] Cheuk-Yiu Chan, Wan-Chi Siu, Yuk-Hee Chan, and H Anthony Chan. Anlightendiff: Anchoring diffusion probabilistic model on low light image enhancement. IEEE Transactions on Image Processing, 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 601, + 295, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 601, + 295, + 656 + ], + "spans": [ + { + "bbox": [ + 62, + 601, + 295, + 656 + ], + "type": "text", + "content": "[9] Gilles Degottex, John Kane, Thomas Drugman, Tuomo Raitio, and Stefan Scherer. Covarep—a collaborative voice analysis repository for speech technologies. In 2014 IEEE international conference on acoustics, speech and signal processing (icassp), pages 960-964. IEEE, 2014. 4, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 658, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 658, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 658, + 295, + 689 + ], + "type": "text", + "content": "[10] Jacob Devlin. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 4, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 691, + 295, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 691, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 691, + 295, + 712 + ], + "type": "text", + "content": "[11] Craig K Enders. Applied missing data analysis. Guilford Publications, 2022. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 554, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 139 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 139 + ], + "type": "text", + "content": "[12] Bin Fu, Fanghua Yu, Anran Liu, Zixuan Wang, Jie Wen, Junjun He, and Yu Qiao. Generate like experts: Multi-stage font generation by incorporating font transfer process into diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6892-6901, 2024. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 141, + 554, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 141, + 554, + 196 + ], + "spans": [ + { + "bbox": [ + 316, + 141, + 554, + 196 + ], + "type": "text", + "content": "[13] Wei Han, Hui Chen, Alexander Gelbukh, Amir Zadeh, Louis-philippe Morency, and Soujanya Poria. Bi-bimodal modality fusion for correlation-controlled multimodal sentiment analysis. In Proceedings of the 2021 international conference on multimodal interaction, pages 6–15, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 198, + 553, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 198, + 553, + 253 + ], + "spans": [ + { + "bbox": [ + 317, + 198, + 553, + 253 + ], + "type": "text", + "content": "[14] Haoyang He, Jiangning Zhang, Hongxu Chen, Xuhai Chen, Zhishan Li, Xu Chen, Yabiao Wang, Chengjie Wang, and Lei Xie. A diffusion-based framework for multi-class anomaly detection. In Proceedings of the AAAI conference on artificial intelligence, pages 8472-8480, 2024. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 256, + 554, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 256, + 554, + 311 + ], + "spans": [ + { + "bbox": [ + 317, + 256, + 554, + 311 + ], + "type": "text", + "content": "[15] Tao He, Yuan-Fang Li, Lianli Gao, Dongxiang Zhang, and Jingkuan Song. One network for multi-domains: domain adaptive hashing with intersectant generative adversarial networks. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, pages 2477-2483, 2019. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 313, + 553, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 313, + 553, + 346 + ], + "spans": [ + { + "bbox": [ + 317, + 313, + 553, + 346 + ], + "type": "text", + "content": "[16] Tao He, Lianli Gao, Jingkuan Song, Jianfei Cai, and Yuan-Fang Li. Semantic compositional learning for low-shot scene graph generation. arXiv preprint arXiv:2108.08600, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 348, + 553, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 348, + 553, + 392 + ], + "spans": [ + { + "bbox": [ + 317, + 348, + 553, + 392 + ], + "type": "text", + "content": "[17] Tao He, Lianli Gao, Jingkuan Song, and Yuan-Fang Li. Semisupervised network embedding with differentiable deep quantization. IEEE Transactions on Neural Networks and Learning Systems, 34(8):4791-4802, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 395, + 553, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 395, + 553, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 395, + 553, + 437 + ], + "type": "text", + "content": "[18] Tao He, Lianli Gao, Jingkuan Song, and Yuan-Fang Li. State-aware compositional learning toward unbiased training for scene graph generation. IEEE Transactions on Image Processing, 32:43-56, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 441, + 553, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 441, + 553, + 484 + ], + "spans": [ + { + "bbox": [ + 317, + 441, + 553, + 484 + ], + "type": "text", + "content": "[19] Tao He, Lianli Gao, Jingkuan Song, and Yuan-Fang Li. Transferable and differentiable discrete network embedding for multi-domains with hierarchical knowledge distillation. Information Sciences, 629:520-532, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 487, + 553, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 487, + 553, + 540 + ], + "spans": [ + { + "bbox": [ + 317, + 487, + 553, + 540 + ], + "type": "text", + "content": "[20] Trong Nghia Hoang, Shenda Hong, Cao Xiao, Bryan Low, and Jimeng Sun. Aid: Active distillation machine to leverage pre-trained black-box models in private data settings. In Proceedings of the Web Conference 2021, pages 3569-3581, 2021. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 544, + 553, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 544, + 553, + 597 + ], + "spans": [ + { + "bbox": [ + 317, + 544, + 553, + 597 + ], + "type": "text", + "content": "[21] Brandon Huang, Chancharik Mitra, Leonid Karlinsky, Assaf Arbelle, Trevor Darrell, and Roei Herzig. Multimodal task vectors enable many-shot multimodal in-context learning. Advances in Neural Information Processing Systems, 37:22124-22153, 2025. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 601, + 553, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 601, + 553, + 645 + ], + "spans": [ + { + "bbox": [ + 317, + 601, + 553, + 645 + ], + "type": "text", + "content": "[22] iMotions. Facial expression analysis. Website, 2017. https://imotions.com/products/imotions-lab/modules/fea-facial-expression-analysis/.4,6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 647, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 647, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 647, + 553, + 712 + ], + "type": "text", + "content": "[23] Jong-Hwan Jang, Junggu Choi, Hyun Woong Roh, Sang Joon Son, Chang Hyung Hong, Eun Young Kim, Tae Young Kim, Dukyong Yoon, et al. Deep learning approach for imputation of missing values in actigraphy data: Algorithm development study. JMIR mHealth and uHealth, 8(7):e16113, 2020. 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24515" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 106 + ], + "type": "text", + "content": "[24] Jaehyeong Jo and Sung Ju Hwang. Continuous diffusion model for language modeling. arXiv preprint arXiv:2502.11564, 2025.3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 108, + 295, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 108, + 295, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 108, + 295, + 140 + ], + "type": "text", + "content": "[25] Vahid Kazemi and Ali Elqursh. Show, ask, attend, and answer: A strong baseline for visual question answering. arXiv preprint arXiv:1704.03162, 2017. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 143, + 294, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 143, + 294, + 196 + ], + "spans": [ + { + "bbox": [ + 56, + 143, + 294, + 196 + ], + "type": "text", + "content": "[26] Amirhossein Kazerouni, Ehsan Khodapanah Aghdam, Moein Heidari, Reza Azad, Mohsen Fayyaz, Ilker Hacihaliloglu, and Dorit Merhof. Diffusion models in medical imaging: A comprehensive survey. Medical Image Analysis, 88:102846, 2023. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 199, + 294, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 199, + 294, + 243 + ], + "spans": [ + { + "bbox": [ + 56, + 199, + 294, + 243 + ], + "type": "text", + "content": "[27] Aghiles Kebaili, Jérôme Lapuyade-Lahorgue, Pierre Vera, and Su Ruan. Amm-diff: Adaptive multi-modality diffusion network for missing modality imputation. arXiv preprint arXiv:2501.12840, 2025. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 245, + 294, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 245, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 56, + 245, + 294, + 289 + ], + "type": "text", + "content": "[28] Linchao Li, Bowen Du, Yonggang Wang, Lingqiao Qin, and Huachun Tan. Estimation of missing values in heterogeneous traffic data: Application of multimodal deep learning model. Knowledge-Based Systems, 194:105592, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 291, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 291, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 56, + 291, + 294, + 334 + ], + "type": "text", + "content": "[29] Wenbo Li, Xin Yu, Kun Zhou, Yibing Song, Zhe Lin, and Jiaya Jia. Image inpainting via iteratively decoupled probabilistic modeling. arXiv preprint arXiv:2212.02963, 2022.3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 337, + 294, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 337, + 294, + 381 + ], + "spans": [ + { + "bbox": [ + 56, + 337, + 294, + 381 + ], + "type": "text", + "content": "[30] Xin Li, Yulin Ren, Xin Jin, Cuiling Lan, Xingrui Wang, Wenjun Zeng, Xinchao Wang, and Zhibo Chen. Diffusion models for image restoration and enhancement-a comprehensive survey. arXiv preprint arXiv:2308.09388, 2023. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 383, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 383, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 383, + 294, + 437 + ], + "type": "text", + "content": "[31] Zheng Lian, Lan Chen, Licai Sun, Bin Liu, and Jianhua Tao. Gcnet: Graph completion network for incomplete multimodal learning in conversation. IEEE Transactions on pattern analysis and machine intelligence, 45(7):8419-8432, 2023. 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 440, + 294, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 440, + 294, + 483 + ], + "spans": [ + { + "bbox": [ + 56, + 440, + 294, + 483 + ], + "type": "text", + "content": "[32] Gong-Xu Liu, Ling-Feng Shi, and Dong-Jin Xin. Data integrity monitoring method of digital sensors for internet-of-things applications. IEEE Internet of Things Journal, 7(5): 4575-4584, 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 485, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 485, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 56, + 485, + 294, + 552 + ], + "type": "text", + "content": "[33] Yubo Liu, Han Li, Qiaoming Deng, and Kai Hu. Diffusion probabilistic model assisted 3d form finding and design latent space exploration: A case study for taihu stone spacial transformation. In The International Conference on Computational Design and Robotic Fabrication, pages 11-23. Springer, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 553, + 294, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 553, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 56, + 553, + 294, + 597 + ], + "type": "text", + "content": "[34] Mengmeng Ma, Jian Ren, Long Zhao, Sergey Tulyakov, Cathy Wu, and Xi Peng. Smil: Multimodal learning with severely missing modality. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 2302-2310, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 600, + 294, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 600, + 294, + 632 + ], + "spans": [ + { + "bbox": [ + 56, + 600, + 294, + 632 + ], + "type": "text", + "content": "[35] Xuerong Mao. The truncated euler-maruyama method for stochastic differential equations. Journal of Computational and Applied Mathematics, 290:370-384, 2015. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 635, + 294, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 294, + 678 + ], + "type": "text", + "content": "[36] Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jia-jun Wu, Jun-Yan Zhu, and Stefano Ermon. Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073, 2021. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 680, + 294, + 713 + ], + "type": "text", + "content": "[37] Jiquan Ngiam, Aditya Khosla, Mingyu Kim, Juhan Nam, Honglak Lee, Andrew Y Ng, et al. Multimodal deep learning. In ICML, pages 689-696, 2011. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 116 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 116 + ], + "type": "text", + "content": "[38] Jianmo Ni, Larry Muhlstein, and Julian McAuley. Modeling heart rate and activity data for personalized fitness recommendation. In The World Wide Web Conference, pages 1343-1353, 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "spans": [ + { + "bbox": [ + 316, + 118, + 553, + 173 + ], + "type": "text", + "content": "[39] Yongsheng Pan, Mingxia Liu, Yong Xia, and Dinggang Shen. Disease-image-specific learning for diagnosis-oriented neuroimage synthesis with incomplete multimodality data. IEEE transactions on pattern analysis and machine intelligence, 44(10):6839-6853, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 175, + 553, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 229 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 229 + ], + "type": "text", + "content": "[40] Hai Pham, Paul Pu Liang, Thomas Manzini, Louis-Philippe Morency, and Barnabás Póczos. Found in translation: Learning robust joint representations by cyclic translations between modalities. In Proceedings of the AAAI conference on artificial intelligence, pages 6892–6899, 2019. 1, 4, 6, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 231, + 553, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 231, + 553, + 296 + ], + "spans": [ + { + "bbox": [ + 316, + 231, + 553, + 296 + ], + "type": "text", + "content": "[41] Ludan Ruan, Yiyang Ma, Huan Yang, Huiguo He, Bei Liu, Jianlong Fu, Nicholas Jing Yuan, Qin Jin, and Baining Guo. Mm-diffusion: Learning multi-modal diffusion models for joint audio and video generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10219-10228, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 297, + 553, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 297, + 553, + 331 + ], + "spans": [ + { + "bbox": [ + 316, + 297, + 553, + 331 + ], + "type": "text", + "content": "[42] Kaveh Samiee and Péter Kovács. Ecg decomposition using cascaded spline projection residual auto encoders. In 2023 Computing in Cardiology (CinC), pages 1-4. IEEE, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 332, + 553, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 332, + 553, + 364 + ], + "spans": [ + { + "bbox": [ + 316, + 332, + 553, + 364 + ], + "type": "text", + "content": "[43] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. Advances in neural information processing systems, 32, 2019. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 365, + 553, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 365, + 553, + 409 + ], + "spans": [ + { + "bbox": [ + 316, + 365, + 553, + 409 + ], + "type": "text", + "content": "[44] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 411, + 555, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 411, + 555, + 454 + ], + "spans": [ + { + "bbox": [ + 316, + 411, + 555, + 454 + ], + "type": "text", + "content": "[45] Yan Tai, Weichen Fan, Zhao Zhang, and Ziwei Liu. Link-context learning for multimodal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 27176-27185, 2024. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 456, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 456, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 316, + 456, + 553, + 521 + ], + "type": "text", + "content": "[46] Yao-Hung Hubert Tsai, Shaojie Bai, Paul Pu Liang, J Zico Kolter, Louis-Philippe Morency, and Ruslan Salakhutdinov. Multimodal transformer for unaligned multimodal language sequences. In Proceedings of the conference. Association for computational linguistics. Meeting, page 6558. NIH Public Access, 2019. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "type": "text", + "content": "[47] Ali Vosoughi, Shijian Deng, Songyang Zhang, Yapeng Tian, Chenliang Xu, and Jiebo Luo. Cross modality bias in visual question answering: A causal view with possible worlds vqa. IEEE Transactions on Multimedia, 2024. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 568, + 553, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 568, + 553, + 610 + ], + "spans": [ + { + "bbox": [ + 316, + 568, + 553, + 610 + ], + "type": "text", + "content": "[48] Jingjing Wang, Dan Zhang, and Feng Luo. Unified directly denoising for both variance preserving and variance exploding diffusion models. arXiv preprint arXiv:2405.21059, 2024.3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 613, + 553, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 553, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 553, + 668 + ], + "type": "text", + "content": "[49] Qi Wang, Liang Zhan, Paul Thompson, and Jiayu Zhou. Multimodal learning with incomplete modalities by knowledge distillation. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 1828-1838, 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "text", + "content": "[50] Weiran Wang, Raman Arora, Karen Livescu, and Jeff Bilmes. On deep multi-view representation learning. In International conference on machine learning, pages 1083-1092. PMLR, 2015. 1, 6, 7" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "24516" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "text", + "content": "[51] Yuanzhi Wang, Zhen Cui, and Yong Li. Distribution-consistent modal recovering for incomplete multimodal learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 22025-22034, 2023. 4, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "type": "text", + "content": "[52] Yuanzhi Wang, Yong Li, and Zhen Cui. Incomplete multimodality-diffused emotion recognition. Advances in Neural Information Processing Systems, 36:17117-17128, 2023. 1, 2, 4, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 175, + 294, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 218 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 218 + ], + "type": "text", + "content": "[53] Zilong Wang, Zhaohong Wan, and Xiaojun Wan. Transmodality: An end2end fusion method with transformer for multimodal sentiment analysis. In Proceedings of the web conference 2020, pages 2514-2520, 2020. 4, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 220, + 294, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 220, + 294, + 273 + ], + "spans": [ + { + "bbox": [ + 56, + 220, + 294, + 273 + ], + "type": "text", + "content": "[54] Zeyu Wang, Jingyu Lin, Yifei Qian, Yi Huang, Shicen Tian, Bosong Chai, Juncan Deng, Lan Du, Cunjian Chen, Yufei Guo, et al. Diffx: Guide your layout to cross-modal generative modeling. arXiv preprint arXiv:2407.15488, 2024.3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 275, + 294, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 275, + 294, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 275, + 294, + 308 + ], + "type": "text", + "content": "[55] Renjie Wu, Hu Wang, and Hsiang-Ting Chen. A comprehensive survey on deep multimodal learning with missing modality. arXiv preprint arXiv:2409.07825, 2024. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 310, + 294, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 310, + 294, + 331 + ], + "spans": [ + { + "bbox": [ + 56, + 310, + 294, + 331 + ], + "type": "text", + "content": "[56] Chang Xu, Dacheng Tao, and Chao Xu. A survey on multiview learning. arXiv preprint arXiv:1304.5634, 2013. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 333, + 294, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 333, + 294, + 388 + ], + "spans": [ + { + "bbox": [ + 56, + 333, + 294, + 388 + ], + "type": "text", + "content": "[57] Dingkang Yang, Mingcheng Li, Dongling Xiao, Yang Liu, Kun Yang, Zhaoyu Chen, Yuzheng Wang, Peng Zhai, Ke Li, and Lihua Zhang. Towards multimodal sentiment analysis debiasing via bias purification. In European Conference on Computer Vision, pages 464-481. Springer, 2024. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 389, + 294, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 389, + 294, + 443 + ], + "spans": [ + { + "bbox": [ + 56, + 389, + 294, + 443 + ], + "type": "text", + "content": "[58] Ling Yang, Zhilong Zhang, Zhaochen Yu, Jingwei Liu, Minkai Xu, Stefano Ermon, and CUI Bin. Cross-modal contextualized diffusion models for text-guided visual generation and editing. In The Twelfth International Conference on Learning Representations, 2024. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 445, + 294, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 294, + 488 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 294, + 488 + ], + "type": "text", + "content": "[59] Amir Zadeh, Rowan Zellers, Eli Pincus, and Louis-Philippe Morency. Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages. IEEE Intelligent Systems, 31(6):82-88, 2016. 6, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 491, + 294, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 491, + 294, + 567 + ], + "spans": [ + { + "bbox": [ + 56, + 491, + 294, + 567 + ], + "type": "text", + "content": "[60] AmirAli Bagher Zadeh, Paul Pu Liang, Soujanya Poria, Erik Cambria, and Louis-Philippe Morency. Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2236–2246, 2018. 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 568, + 294, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 568, + 294, + 601 + ], + "spans": [ + { + "bbox": [ + 56, + 568, + 294, + 601 + ], + "type": "text", + "content": "[61] Maciej Želaszczyk and Jacek Mandyuk. Text-to-image cross-modal generation: A systematic review. arXiv preprint arXiv:2401.11631, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 602, + 294, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 602, + 294, + 656 + ], + "spans": [ + { + "bbox": [ + 56, + 602, + 294, + 656 + ], + "type": "text", + "content": "[62] Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 833-842, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 658, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 712 + ], + "type": "text", + "content": "[63] Qiongan Zhang, Lei Shi, Peiyu Liu, Zhenfang Zhu, and Liancheng Xu. Retracted article: Icdn: integrating consistency and difference networks by transformer for multimodal sentiment analysis. Applied Intelligence, 53(12): 16332-16345, 2023. 6, 7" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 339 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 126 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 126 + ], + "type": "text", + "content": "[64] Zhengxin Zhang, Xiaosheng Si, Changhua Hu, and Yaguo Lei. Degradation data analysis and remaining useful life estimation: A review on wiener-process-based methods. European Journal of Operational Research, 271(3):775-796, 2018. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 129, + 553, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 205 + ], + "type": "text", + "content": "[65] Jinming Zhao, Ruichen Li, and Qin Jin. Missing modality imagination network for emotion recognition with uncertain missing modalities. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2608-2618, 2021. 4, 6, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 207, + 553, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 207, + 553, + 259 + ], + "spans": [ + { + "bbox": [ + 316, + 207, + 553, + 259 + ], + "type": "text", + "content": "[66] Guanyu Zhou, Yibo Yan, Xin Zou, Kun Wang, Aiwei Liu, and Xuming Hu. Mitigating modality prior-induced hallucinations in multimodal large language models via deciphering attention causality. arXiv preprint arXiv:2410.04780, 2024. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 262, + 553, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 262, + 553, + 295 + ], + "spans": [ + { + "bbox": [ + 316, + 262, + 553, + 295 + ], + "type": "text", + "content": "[67] Tongxue Zhou, Su Ruan, and Stephane Canu. A review: Deep learning for medical image segmentation using multimodality fusion. *Array*, 3:100004, 2019. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 297, + 553, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 297, + 553, + 339 + ], + "spans": [ + { + "bbox": [ + 316, + 297, + 553, + 339 + ], + "type": "text", + "content": "[68] Tong Zhu, Leida Li, Jufeng Yang, Sicheng Zhao, Hantao Liu, and Jiansheng Qian. Multimodal sentiment analysis with image-text interaction network. IEEE transactions on multimedia, 25:3375-3385, 2022. 2" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "24517" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file